From cbfb41ecf1c79a557ff20580a7ab5e95c90e4124 Mon Sep 17 00:00:00 2001 From: Galeone <galeov97@login.curta.zedat.fu-berlin.de> Date: Tue, 12 Sep 2023 13:38:16 +0200 Subject: [PATCH] moving files from old repo --- README.md | 540 ++++++++- SUBMIT_CONFIG/default/config.yaml | 8 + SUBMIT_CONFIG/slurm/cluster.yaml | 355 ++++++ SUBMIT_CONFIG/slurm/config.yaml | 40 + Snakefile | 303 +++++ configuration/config.yaml | 17 + configuration/define_resources.yaml | 180 +++ .../build_hifi_example.tsv | 3 + .../build_illumina_example.tsv | 6 + .../exampleSampleSheets/runEval_example.tsv | 4 + envs/AUXILIARY_PYTHON_SCRIPTS.yaml | 14 + envs/AUXILIARY_R_SCRIPTS.yaml | 12 + envs/BUSCO.yaml | 19 + envs/HiC_CONTACT_MAPS.yaml | 9 + envs/MERYL_MERQURY.yaml | 18 + envs/SMUDGEPLOT_MERQURY.yaml | 12 + envs/UNZIP_and_QC.yaml | 12 + installGEP.yaml | 12 + rules/build_hifi.smk | 178 +++ rules/build_illumina.smk | 310 +++++ rules/evaluate.smk | 1039 +++++++++++++++++ scripts/ALT_missing.fasta | 0 scripts/addFullTableForReport.py | 26 + scripts/assembly_stats/stats.py | 368 ++++++ scripts/colouredHeatmap_legend.tsv | 5 + .../colouredHeatmap_legend_metrics.tsv | 6 + .../fullTable_heatmap_external_metrics.R | 144 +++ ...Table_heatmap_internalComparison_metrics.R | 147 +++ .../internalComparison_legend_metrics.tsv | 5 + scripts/digest.py | 26 + scripts/filter_five_end.pl | 110 ++ scripts/fullTable_heatmap_external.R | 132 +++ .../fullTable_heatmap_internalComparison.R | 140 +++ scripts/internalComparison_legend.tsv | 3 + scripts/makePDF_indivMD.py | 0 scripts/pretextMapsToMarkdown.py | 6 + scripts/process_HiC/filter_five_end.pl | 110 ++ scripts/process_HiC/two_read_bam_combiner.pl | 124 ++ scripts/report/addFullTableForReport.py | 26 + .../addFullTableForReport_all_metrics.py | 8 + scripts/report/makeAllMetricsTable.sh | 47 + scripts/report/makePDF_indivMD.py | 262 +++++ scripts/report/make_erga_assembly_report.py | 80 ++ scripts/report/pretextMapsToMarkdown.py | 6 + scripts/report/reportLandingPage.md | 19 + scripts/report/tableOnSamePage.css | 3 + scripts/reportLandingPage.md | 19 + scripts/reportTABLELandingPage.md | 19 + scripts/standIN_files/ALT_missing.fasta | 0 scripts/standIN_files/snakemake_missing.txt | 0 scripts/stats.py | 368 ++++++ scripts/tableOnSamePage.css | 3 + scripts/two_read_bam_combiner.pl | 124 ++ 53 files changed, 5370 insertions(+), 57 deletions(-) create mode 100644 SUBMIT_CONFIG/default/config.yaml create mode 100644 SUBMIT_CONFIG/slurm/cluster.yaml create mode 100644 SUBMIT_CONFIG/slurm/config.yaml create mode 100644 Snakefile create mode 100644 configuration/config.yaml create mode 100644 configuration/define_resources.yaml create mode 100644 configuration/exampleSampleSheets/build_hifi_example.tsv create mode 100644 configuration/exampleSampleSheets/build_illumina_example.tsv create mode 100644 configuration/exampleSampleSheets/runEval_example.tsv create mode 100644 envs/AUXILIARY_PYTHON_SCRIPTS.yaml create mode 100644 envs/AUXILIARY_R_SCRIPTS.yaml create mode 100644 envs/BUSCO.yaml create mode 100644 envs/HiC_CONTACT_MAPS.yaml create mode 100644 envs/MERYL_MERQURY.yaml create mode 100644 envs/SMUDGEPLOT_MERQURY.yaml create mode 100644 envs/UNZIP_and_QC.yaml create mode 100644 installGEP.yaml create mode 100644 rules/build_hifi.smk create mode 100644 rules/build_illumina.smk create mode 100644 rules/evaluate.smk create mode 100644 scripts/ALT_missing.fasta create mode 100644 scripts/addFullTableForReport.py create mode 100644 scripts/assembly_stats/stats.py create mode 100644 scripts/colouredHeatmap_legend.tsv create mode 100644 scripts/compare_results/colouredHeatmap_legend_metrics.tsv create mode 100644 scripts/compare_results/fullTable_heatmap_external_metrics.R create mode 100644 scripts/compare_results/fullTable_heatmap_internalComparison_metrics.R create mode 100644 scripts/compare_results/internalComparison_legend_metrics.tsv create mode 100644 scripts/digest.py create mode 100644 scripts/filter_five_end.pl create mode 100644 scripts/fullTable_heatmap_external.R create mode 100644 scripts/fullTable_heatmap_internalComparison.R create mode 100644 scripts/internalComparison_legend.tsv create mode 100644 scripts/makePDF_indivMD.py create mode 100644 scripts/pretextMapsToMarkdown.py create mode 100644 scripts/process_HiC/filter_five_end.pl create mode 100644 scripts/process_HiC/two_read_bam_combiner.pl create mode 100644 scripts/report/addFullTableForReport.py create mode 100644 scripts/report/addFullTableForReport_all_metrics.py create mode 100755 scripts/report/makeAllMetricsTable.sh create mode 100644 scripts/report/makePDF_indivMD.py create mode 100644 scripts/report/make_erga_assembly_report.py create mode 100644 scripts/report/pretextMapsToMarkdown.py create mode 100644 scripts/report/reportLandingPage.md create mode 100644 scripts/report/tableOnSamePage.css create mode 100644 scripts/reportLandingPage.md create mode 100644 scripts/reportTABLELandingPage.md create mode 100644 scripts/standIN_files/ALT_missing.fasta create mode 100644 scripts/standIN_files/snakemake_missing.txt create mode 100644 scripts/stats.py create mode 100644 scripts/tableOnSamePage.css create mode 100644 scripts/two_read_bam_combiner.pl diff --git a/README.md b/README.md index f18e7e5..a4f0df0 100644 --- a/README.md +++ b/README.md @@ -1,92 +1,518 @@ -# GEP +# README +# Genome Evaluation Pipeline (GEP) +<br> -## Getting started +* User-friendly and **all-in-one quality control and evaluation** Snakemake pipeline for genome assemblies -To make it easy for you to get started with GitLab, here's a list of recommended next steps. +* Run **multiple genome evaluations** in one parallel (as many as you want!) -Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)! +* **Scales to server and HPC environments** -## Add your files +* Required **software stack automatically deployed** using Conda -- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files -- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command: +--- + + + +# The GEP Workflow - Two *Modes* + + + +### 1. **Build Mode**: + +Builds meryl k-mer databases from sequencing reads. If you've already generated these database, skip to **Evaluate Mode**. + +--- +#### Requires: + - Raw Illumina short-insert whole-genome sequencing reads + +and/or + + - Raw PacBio HiFi sequencing reads +--- +#### *Process* and **tools**: + - *(Optionally) trim raw sequencing reads* + - **Trim Galore** (remove Illumina adapters) and **Trimmomatic** (remove 10x barcodes if present) for Illumina reads + - **Cutadapt** (remove SMRTbell adaptors) for HiFI reads + - *(Optionally) perform quality check of raw sequencing read*s + - **FastQC** and **MultiQC** + - *Builds k-mer databases from raw sequencing reads to be used for evalution* + - **Meryl** +<br> + + + + + +<br> + +--- + +### 2. **Evaluation Mode:** + +Runs numerous whole-genome assembly evaluation tools, generating a user-friendly report for easy viewing and distribution (as well as individual outputs as normally generated by the tools in a well-defined folder structure). + +--- +Requires: + - Meryl k-mer databases (as produced by **Build Mode**) + - Genome Assembly(s) to evaluate - including (optionally) phased alternate assembly + - (Optionally) Hi-C libraries + - (Optionally) Estimated genome sizes + +--- +*Process* and **tools**: + - *K-mer completeness/correctness, QV Score, Spectra-CN (k-mer multiplicity) plots* + - **Merqury** + - *Genome Profiling based on raw sequencing reads*s + - **GenomeScope2** + - *Extensive contiguity statistics such as N# and L# stats, Scaffold and contig counts, GC Content, etc.* + - **Python script** + - *Gene completeness/correctness (i.e. recovered complete, duplicated, missing BUSCOs)* + - **BUSCO5** + - *(Optionally) Hi-C contact maps* + - **BWA-MEM2** and **samtools** for aligning Hi-C data + - **Pretext** for generating contact maps +<br> + + + + + +--- + + +## **Downloading the workflow** + + +To clone the repository, use the following command: +``` +git clone https://git.imp.fu-berlin.de/cmazzoni/GEP.git +``` + +--- + + + +## Installing Conda + +- Conda (v4.11+) *but may work on older versions* + +If you already have conda installed on your system, please skip to [Creating our Snakemake conda environment](#creating-our-snakemake-conda-environment) + +Download the linux Miniconda3 installer from the following URL: https://docs.conda.io/en/latest/miniconda.html + + +Run the miniconda3 installation and check if it worked: + +``` +bash /<your_path_to>/Miniconda3-latest-Linux-x86_64.sh +##Follow miniconda3 installation instructions## + +source ~/.bashrc + +conda update conda +``` + +If `conda command not found` please close and re-open your terminal for conda installation to take effect, and then update. + +--- +<div id="creating-our-snakemake-conda-environment"></div> + +## Creating our GEP (Snakemake) Conda environment + +The pipeline requires the following software to run (some versions may not need to be exactly matching): +- snakemake (v7.8.5) +- beautifulsoup4 (v4.9.3) +- pandas (v1.4.2) +- numpy (v1.22.3) +- python (v3.10) +- mamba (v0.25) + + +The easiest method to install this software stack is to create a GEP conda environment with the provided `installGEP.yaml` (see ***Note**) + + +``` +conda env create -f /<your_path_to>/GEP/installGEP.yaml + +conda activate GEP + +##check snakemake installed correctly + +snakemake --version +``` + +You may need to close and reopen your terminal after the `conda env create ...` command, or use `source ~/.bashrc` depending on your system. + + + +***Note** *If you already have a snakemake (or suitable Python) installation and would like to avoid installing again, ensure that all of the above software are in your `PATH`. If you do this instead of installing from the provided GEP environment (`installGEP.yaml`), you will still need at least the base conda installed/activated - as it's required to handle software dependencies of all the tools used within the workflow itself* +<br> + +--- + + + +<div id="configure-sample"></div> + +## **Configure Sample Sheet `.tsv`s** + +**IMPORTANT:** GEP can run with *only one* of the below sample sheets at a given time. A sample sheet *can* however include multiple different samples, datasets, and assemblies for a given run. + +**Depending on which sample sheet is provided, GEP will automatically run in one of the following two modes:** + +1. **Build** + + - This mode is run if either the [Illumina Sample Sheet `.tsv`](#illumina-sample) or the [PacBio HiFi Sample Sheet `.tsv`](#hifi-sample) are provided to GEP. + +2. **Evaluate** + - This mode is run if the [Assembly Evaluation Sample Sheet `.tsv`](#assembly-eval) is provided. + + + +If you already have meryl k-mer databases [(see the meryl github for more details)](https://github.com/marbl/merqury/wiki/1.-Prepare-meryl-dbs) for the genomes you wish to evaluate, you can skip **Build** mode. + + +<br> + +--- + +<div id="illumina-sample"></div> + +#### Illumina Sample Sheet `.tsv` - for **Build** mode + + +(see example [GEP/configuration/exampleSampleSheets/build_illumina_example.tsv](configuration/exampleSampleSheets/build_illumina_example.tsv)) + + +|sample|Library_R1|Library_R2|meryl_kmer_size| trim10X| trimAdapters| fastQC| +|:----|:---|:---|:---|:---|:----|:---| +|Your preferred identifier (Results will include this sample name in output files) <br /><br />**`string`** | **Full path** to Forward (R1) read of PE library/s **in `fastq` format. Can be `.gz`ipped** | **Full path** to Reverse (R2) read of PE library/s **in `fastq` format. Can be `.gz`ipped**| Your choice of k-mer size used to count k-mers in the reads. <br /><br /> **Recommended for llumina reads is `21`**| Remove first 23bp from R1 of the library. This is only if your reads were sequenced using 10X sequencing platform.<br /><br /> **Possible options are `True` or `False`** | Check for and remove any other sequencing adapters that may still be present in the reads.<br /><br /> **Possible options are `True` or `False`** |Run FastQC on the library pair provided in `Library_R1` and `Library_R2` columns. <br /><br /> **Possible options are `True` or `False`**| + +<br> + +Additional Info in case of multiple PE libraries for a single sample: + - **sample**: Provide the same sample ID for each of the pairs of a sample (the final output will be a single `.meryl` database consisting of k-mers from all PE libraries given for a sample, with this identifier as a prefix). Every line with the same unique sample ID will be considered as coming from the same sample. + - **Library_R1** and **Library_R2**: Each library pair is provided as one line in the tsv. If you have three PE libraries, then you will have three lines in the tsv for this sample. + - **meryl_kmer_size**: This should be consistent for libraries that have the same sample ID. + + +If you are a little uncertain about which short-read libraries you should use, see [How to choose your illumina libraries](#how-to-choose-your-illumina-libraries) further down the page. + +<br> + +--- +<div id="hifi-sample"></div> + +#### PacBio HiFi Sample Sheet `.tsv` - for **Build** mode +(see example [GEP/configuration/exampleSampleSheets/build_hifi_example.tsv](configuration/exampleSampleSheets/runEval_example.tsv)) + +|sample|hifi_reads|meryl_kmer_size| trimSMRTbell| fastQC| +|:----|:---|:---|:----|:---| +|Your preferred identifier (Results will include this sample name in output files) <br /><br />**`string`** | **Full path** to PacBio HiFi library **in `fastq` format. Can be `.gz`ipped** | Your choice of k-mer size used to count k-mers in the reads. <br /><br /> **Recommended for PacBio Hifi is `31`**| Check for and remove any SMRT-bell adapter sequences. <br /><br /> **Possible options are `True` or `False`** | Run FastQC on the library provided in `hifi_reads` column. <br /><br /> **Possible options are `True` or `False`** | + + + +<br> + +Additional Info in case of multiple HiFi libraries for a single sample: + - **sample**: Provide the same sample ID for each Hifi library of a sample (the final output will be a single `.meryl` database consisting of k-mers from all PE libraries given for a sample, with this identifier as a prefix). Every line with the same unique sample ID will be considered as coming from the same sample. + - **hifi_reads**: Each library is provided as one line in the tsv. If you have three HiFi libraries for the same sample, then you will have three lines in the tsv for this sample. + - **meryl_kmer_size**: This should be consistent for libraries that have the same sample ID. + +<br> + +--- +**Note:** You may also wish to concatenate the libraries of a sample together prior to building the database. In this case, you will only need to provide one line per sample in the respective sample sheets. However, the execution run-time will be hindered as the pipeline is designed to run on multiple libraries in parallel. + +--- + +<div id="assembly-eval"></div> + +#### Assembly Evaluation Sample Sheet `.tsv` - for **Evaluate** mode + +See [GEP/configuration/exampleSampleSheets/runEval_example.tsv](configuration/exampleSampleSheets/runEval_example.tsv) for an idea of how to set up the evaluation sample sheet. + + +|ID|ASM_LEVEL | PRI_asm|ALT_asm| merylDB| merylDB_kmer |genomeSize|HiC_R1|HiC_R2| +|:----|:---|:---|:---|:----|:---|:---|:---|:------------------| +|Identifier for results and reporting| Level that genome is assembled to. <br /><br /> **Possible options are `chr` (chromosome), `scaff` (scaffold), or `cont` (contig)** |Full path to primary assembly you wish to evaluate in`fasta` format (any extension allowed). <br /><br /> **Can be `.gz`ipped** | Full path to alternate assembly (haplotype) in`fasta` format (any extension allowed). <br /><br /> **Can be `.gz`ipped.** <br /><br /> **If you do not have one, write** `None` | Full path to `.meryl` database. <br /><br /> **Must have `.meryl` extension**| The k-mer size used to build the provided corresponding `.meryl` database. | Provide a size estimate (in bp and as an `integer`) for the corresponding assembly/species. <br /><br /> **If you do not have one, write** `auto` |Full path to Forward (R1) reads of HiC library* in `fastq` format. <br /><br /> **Can be `.gz`ipped** |Full path to Reverse (R2) read of HiC library* in `fastq` format. <br /><br /> **Can be `.gz`ipped**| + +*Please concatenate all HiC Libraries for a sample into a single library pair (one file for R1 and one file for R2), providing this concatenated pair to the sample sheet + +#### Difference between `sample` column in Build Mode sample sheets and `ID` column in Evaluate Mode sample sheet +In practice, ’sample’ from Build Mode sample sheets and ’ID’ from Evaluate Mode sample sheets may often be synonymous, however they are defined differently for cases where multiple assemblies (defined in the sample sheet with unique ID’s) are evaluated using the same k-mer database from a single ’sample’, and so the results need to be differentiated. + + + +--- + + + +### Configuration `.yaml` + +For both **Build** and **Evaluate** modes, you will need to configure the `config.yaml` found in the [GEP/configuration](configuration/config.yaml) folder. + +``` +Results: "<path>" + +samplesTSV: "<path>" + +busco5Lineage: "<nameofbuscoDB>" + +resources: "configuration/define_resources.yaml" +``` + +##### `Results:` +Provide a path where all your results are stored for the run. It does not have to be inside the project folder (can be any location accessible by the user). You also do not need to create this folder yourself, Snakemake will create this folder if it does not yet exist (together with any non-existent parent folders). + + +##### `samplesTSV:` +This is where you provide the full path for **one** of the sample sheets after you have filled out the necessary information. Depending on which kind of sample sheet you provide, GEP will run the corresponding mode (**Build** - Illumina, **Build** - HiFi, or **Evaluate**) + +##### `busco5Lineage` - Only required for **Evaluate** mode +Busco needs a database to be able to run. Here you have a couple of different options. + +Have a look at all the busco databases here https://busco-data.ezlab.org/v5/data/lineages/. + +**You can simply provide the taxonomy name that you wish to use and latest database matching the name provided will be automatically downloaded prior to execution**. + +``` +busco5Lineage: "vertebrata" +``` + +**OR** You can manually download and unpack your desired BUSCO lineage (or if you already have a BUSCO database downloaded), and provide the full path to this. Alternatively, you may already have the necessary BUSCO database on your system and do not wish for GEP to re-download it. In this case, provide the path to the already existing BUSCO db: ``` -cd existing_repo -git remote add origin https://git.imp.fu-berlin.de/begendiv/gep.git -git branch -M main -git push -uf origin main +busco5Lineage: "/<PathTo>/manuallyDownloaded/vertebrata_odb10" +``` + + + +*If a busco database matching the given name already exists in the `GEP/buscoLineage/` folder, either from manual download or from previously automatic download (from previously executed runs), then the pipeline will skip re-downloading.* + +--- + + + + +### Configure run (some more `.yaml`'s) + + +The user will also need to configure a separate config.yaml file pertaining to the execution of Snakemake,and depending on whether or not GEP will be executed HPC infrastructure. + +#### Default (non-HPC) configuration +Modify the Snakemake parameters in the file located at [GEP/SUBMIT_CONFIG/**default**/config.yaml](GEP/SUBMIT_CONFIG/default/config.yaml). + +Here the user can provide a key-value pair for any Snakemake parameter, such as the maximum number of threads or jobs to utilise at any given time for the entire execution. Some commonly used parameters are already included in the provided file, but the user can add any Snakemake parameter to this file (run ’snakemake –help for possible options). There are already some default parameters present. + +``` +#Example + +cores: 64 +dry-run: False +use-conda: True +latency-wait: 60 +rerun-incomplete: True +printshellcmds: False +keep-going: False +``` + + +#### HPC (SLURM) configuration + +Modify the Snakemake parameters in the file located at [GEP/SUBMIT_CONFIG/**slurm**/config.yaml](GEP/SUBMIT_CONFIG/slurm/config.yaml). Similar to the default execution, any Snakemake-specific parameter can be configured here. Additionally, the user can provide default resources to be used by every job submitted to the Slurm scheduler such as ’–partition’ and ’–qos’. + +--- + +## `cores`, `jobs`, `resources`, and parallelisation + +Memory (in Mb), wall-time, and number of threads for non-locally executed jobs are defined in the file [GEP/configuration/define_resources.yaml](GEP/configuration/define_resources.yaml). The current values are likely over estimations and should allow for executing GEP without any issues. As required, these values should be increased or decreased accordingly. **If running on non-HPC infrastructure the wall-time will be ignored.** + +The Snakemake parameter `cores` (as found in `SUBMIT_CONFIG/<default_OR_slurm>/config.yaml` ) is the maximum number of cores (synonymous with threads in this case) GEP will utilise at any time during execution. **For example during Evaluation Mode if a single BUSCO job uses 16 threads (as is set currently in define_resources.yaml), then by setting the maximum number of cores to 64, GEP will run up to (64/16 =) 4 BUSCO jobs in parallel. For other steps that require less than 16 threads, the number of jobs will be scaled up to utilise as many of the total requested cores as possible without exceeding the maximum.** + +Similarly the user can provide a maximum number of jobs (e.g. `jobs: 10`). **GEP will never exceed either the maximum number of cores or the maximum number of jobs, whichever is reached first.** In the above example, **if the user was to provide `cores: 64` AND `jobs: 2`, then GEP will only ever run two jobs in parallel regardless of whether the maximum number of cores has been reached.** The user needs to consider how many samples there are (and therefore how many instances of a single step will be run), as well as how much resources are available. + +--- +# Running GEP + +Checklist before running: + +- [x] Installed and activated the GEP Conda environment +- [x] Filled out the respective sample sheet correctly +- [x] Filled out the run configuration (i.e providing a results folder and the path to the sample sheet) in [GEP/configuration/config.yaml](configuration/config.yaml) +- [x] Change the maximum number of `cores` and `jobs`, as well as any other snakemake-specific parameters you wish to use, in the file [GEP/SUBMIT_CONFIG/**default**/config.yaml](GEP/SUBMIT_CONFIG/default/config.yaml), or for executing on an HPC (Slurm) infrastructure the file [GEP/SUBMIT_CONFIG/**slurm**/config.yaml](GEP/SUBMIT_CONFIG/slurm/config.yaml) +- [x] Modify the per-job resources (i.e. threads, memory, and wall-time) in the file [GEP/configuration/define_resources.yaml](GEP/configuration/define_resources.yaml) + +*For simplicity, make you are inside the main GEP folder before running* + +For both local and HPC (slurm) execution, it is good practice to run GEP as a *dry run* first **before any full execution**. This is give you an idea of whether the sample sheet and `configuration/config.yaml` are filled out correctly (by checking if the paths exists), and if there is anything wrong with your installation. + + + +``` +snakemake --cores 1 --dry-run +``` + + +### Default Execution + ``` +snakemake --profile SUBMIT_CONFIG/default/ +``` + +### HPC (Slurm) Execution + +``` +snakemake --profile SUBMIT_CONFIG/slurm/ +``` + +### Run in background +For those not aware, you can run GEP with `nohup` and `&` added to the command (both default and slurm). `nohup` will run the Snakemake command in a way that cannot be interrupted when you lose connection to the server/cluster. The trailing `&` will simply run the command in the background of your current terminal, allowing you to freely use the terminal instance in the meantime. +``` +nohup snakemake --profile SUBMIT_CONFIG/slurm/ & +``` + +--- + + +## More Useful Tips + +The user can directly override any of the Snakemake parameters (and paths) set in the respective configuration files through the command line when executing GEP. For example, to override the values in the sample configuration [GEP/configuration/config.yaml](configuration/config.yaml) for the path to the main results folder without having to edit the document itself: + +``` +snakemake --profile SUBMIT_CONFIG/default/ --config [Results=/Alternative/ResultsFolder] +``` +Or to override the BUSCO lineage for example: +``` +snakemake --profile SUBMIT_CONFIG/default/ --config [busco5Lineage=vertebrata] +``` + + +To override values for maximum number of cores and jobs, as well as setting the flag for printing the shell commands (a Snakemake-specific parameter) in the submission configuration as found in [GEP/SUBMIT_CONFIG/default/config.yaml](GEP/SUBMIT_CONFIG/default/config.yaml): + +``` +snakemake --profile SUBMIT_CONFIG/default/ --cores 144 --jobs 20 --printshellcmds +``` + +The user can also run GEP only up to and including specific rules in the pipeline if they do not wish to run the full evaluation: + +``` +snakemake --profile SUBMIT_CONFIG/default/ --until <ruleName> +``` + +In such cases all files that this rule depends on will be run, but final PDF reports will not be created. + +--- + +# Results + +## Folder Structure of results + +### **Build Mode** + + +### **Evaluate Mode** + + + + +## PDF Report + + + +**One page per-assembly for key results** + + + + + +- **A:** Name of the assembly and k-mer size used for databases. +- **B:** Key contiguity statistics. Full contiguity statistics can be view in the respective output files for that step. +- **C:** Genomescope Profiles (Untransformed Linear and Log versions). +- **D:** QV Score and K-mer Completeness tables. +- **E:** BUSCO results including name of BUSCO clade. +- **F:** K-mer multiplicity plots for primary assembly only (Top) and both primary and alternate assemblies together (Bottom). If no alternate assembly is provided, the same plot will be present in both positions. + +--- +Followed by **Up to six (2 by 3) Hi-C contact maps per page** + + + +--- + + + + +Followed by a page with **an aggregated table (coloured as a heatmap) for all assemblies evaluated in a given run** + + + +--- +Followed by a page with **an aggregated table coloured according to tiers, also for all assemblies evaluated in a given run**. The threshold values for the different tiers were loosely based on the Earth Biogenome Project's [TABLE OF PROPOSED EBP METRICS](https://www.earthbiogenome.org/assembly-standards. + + + +--- + +#### Tables in different formats -## Integrate with your tools -- [ ] [Set up project integrations](https://git.imp.fu-berlin.de/begendiv/gep/-/settings/integrations) +*The above tables can also be found in the results folder: `/<pathToResults>/1evaluation/finalResults/tables/` in various formats (i.e. `.tsv`, `.md`, `.html`).* -## Collaborate with your team +--- -- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/) -- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html) -- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically) -- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/) -- [ ] [Set auto-merge](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html) +#### All result files -## Test and Deploy +All results files as output by each evaluation tool can also be found in their respective folders. -Use the built-in continuous integration in GitLab. +--- +# How to choose your Illumina libraries -- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html) -- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing(SAST)](https://docs.gitlab.com/ee/user/application_security/sast/) -- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html) -- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/) -- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html) -*** +Variations in sequencing methods/protocols can lead to an increase in bias in the corresponding raw sequencing libraries. Sequencing a biological sample may often consist of both mate-pair/long-insert (e.g. insert sizes of 5k, 10k, 20k bp, etc.) and short-insert (e.g. insert-sizes 180, 250, 500, 800bp) paired-end libraries, respectively. -# Editing this README +Usually you can deduce the insert sizes and library types from the metadata found within NCBI or and SRA archive. In order to maintain a little bias as possible whilst maintaining decent coverage, you should ideally use only short-insert paired-end libraries for this evaluation pipeline. -When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thank you to [makeareadme.com](https://www.makeareadme.com/) for this template. +--- -## Suggestions for a good README -Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information. +## Citations -## Name -Choose a self-explaining name for your project. +####################################################################### -## Description -Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors. +The software/tools used as part of our genome evaluation are as follows. Please see [GEP/envs] for exact versions of tools, as they may be updated frequently: -## Badges -On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge. +#### Pre-Processing: +* **Trimmomatic** (*Bolger, A. M., Lohse, M., & Usadel, B. (2014)*. http://www.usadellab.org/cms/?page=trimmomatic) +* **Trim_galore** (*Felix Krueger* bioinformatics.babraham.ac.uk) +* **Cutadapt** (*Martin, M. (2011)*. https://doi.org/10.14806/ej.17.1.200) +* **Fastqc** (*Simon Andrews* https://github.com/s-andrews/FastQC +* **Multiqc** (*Ewels, P., Magnusson, M., Lundin, S., Käller, M. (2016)*. https://doi.org/10.1093/bioinformatics/btw354) -## Visuals -Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method. +#### Reference-free Genome Profiling +* **GenomeScope2** (*Ranallo-Benavidez, T.R., Jaron, K.S. & Schatz, M.C. (2020)* https://github.com/tbenavi1/genomescope2.0) -## Installation -Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection. +#### K-mer distribution analysis +* **meryl** and **merqury** (*Rhie, A., Walenz, B.P., Koren, S. et al. (2020)*. https://doi.org/10.1186/s13059-020-02134-9) -## Usage -Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README. -## Support -Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc. -## Roadmap -If you have ideas for releases in the future, it is a good idea to list them in the README. +#### Assessing quality and annotation completeness with Benchmarking Universal Single-Copy Orthologs (BUSCOs) +* **BUSCOv5** (*Seppey M., Manni M., Zdobnov E.M. (2019)* https://busco.ezlab.org/ ) -## Contributing -State if you are open to contributions and what your requirements are for accepting them. -For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self. +#### Hi-C Alignment and Contact Maps +* **samtools** (*Danecek, P., (2021)* https://doi.org/10.1093/gigascience/giab008) +* **BWA** (*Li H. and Durbin R. (2009)* https://doi.org/10.1093/bioinformatics/btp324) +* Pretext (https://github.com/wtsi-hpag/PretextMap) -You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser. +#### Generate PDF reports from markdown files +* **Pandoc** (https://pandoc.org/MANUAL.html) -## Authors and acknowledgment -Show your appreciation to those who have contributed to the project. -## License -For open source projects, say how it is licensed. +#### Scaffold/contig statistics: N# and L# stats, scaffold metrics, sequence counts, GC content, Estimated genome size +* Python scripts (*Mike Trizna. assembly_stats 0.1.4 (Version 0.1.4). Zenodo. (2020)*. http://doi.org/10.5281/zenodo.3968775 ) (This script was used as a foundation, being modified and improved upon for inclusion in GEP) -## Project status -If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers. +####################################################################### diff --git a/SUBMIT_CONFIG/default/config.yaml b/SUBMIT_CONFIG/default/config.yaml new file mode 100644 index 0000000..9f2b668 --- /dev/null +++ b/SUBMIT_CONFIG/default/config.yaml @@ -0,0 +1,8 @@ +cores: 256 +dry-run: False +use-conda: True +latency-wait: 60 +rerun-incomplete: True +printshellcmds: False +keep-going: False +#use-singularity: False diff --git a/SUBMIT_CONFIG/slurm/cluster.yaml b/SUBMIT_CONFIG/slurm/cluster.yaml new file mode 100644 index 0000000..3098c7c --- /dev/null +++ b/SUBMIT_CONFIG/slurm/cluster.yaml @@ -0,0 +1,355 @@ +# __default__: +# jobname: 'GEP.{rule}' +# partition: begendiv,main +# # nCPUs: "{threads}" +# qos: 'standard' +# + + +#### PRETEXT #### + +unzipHiC_R1: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 500 + time: "01:30:05" + threads: 4 + +# symlinkUnzippedHiC_R1: +# jobname: GEP.{rule}.{wildcards.asmID} +# mem_mb: 10 +# time: "00:05:00" +# threads: 1 + +unzipHiC_R2: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 500 + time: "01:30:05" + threads: 4 + +# symlinkUnzippedHiC_R2: +# jobname: GEP.{rule}.{wildcards.asmID} +# mem_mb: 10 +# time: "00:05:00" +# threads: 1 + + + +pretext_index_PRI_asm: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 72000 + time: "02:30:00" + threads: 1 + +pretext_fastq2bam_R1: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 32000 + time: "08:00:00" + threads: 12 + + +pretext_fastq2bam_R2: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 32000 + time: "08:00:00" + threads: 12 + +pretext_filter_5primeEnd_R1: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 200 + time: "08:00:00" + threads: 4 + +pretext_filter_5primeEnd_R2: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 200 + time: "08:00:00" + threads: 4 + + +pretext_filtered_combine: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 9500 + time: "08:30:00" + threads: 12 + +pretext_map: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 32000 + time: "08:30:00" + threads: 12 + +pretext_snapshot: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 500 + time: "00:10:00" + threads: 1 + +# pretextMaps2md: +# jobname: GEP.{rule}.{wildcards.asmID} +# mem_mb: 500 +# time: "00:10:00" +# threads: 1 + + +unzipFasta_PRI: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 500 + time: "00:15:00" + threads: 4 + +# symlinkUnzippedFasta_PRI: +# jobname: GEP.{rule}.{wildcards.asmID} +# mem_mb: 500 +# time: "00:15:00" +# threads: 1 + + +unzipFasta_ALT: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 500 + time: "00:15:00" + threads: 4 + + + +# symlinkUnzippedFasta_ALT: +# jobname: GEP.{rule}.{wildcards.asmID} +# mem_mb: 500 +# time: "00:05:05" +# threads: 1 + + +merqury: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 40000 + time: "08:00:00" + threads: 8 + +busco5: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 150000 + time: "1-00:00:00" + threads: 16 + + + +# moveBuscoOutputs: +# jobname: GEP.{rule}.{wildcards.asmID} +# mem_mb: 500 +# time: "00:05:05" +# threads: 1 + + + + + + + + +genomescope2: + jobname: GEP.{rule}.{wildcards.asmID}.{wildcards.kmer} + mem_mb: 500 + time: "00:15:00" + threads: 1 + + +assemblyStats: + jobname: GEP.{rule}.{wildcards.asmID} + mem_mb: 500 + time: "00:15:00" + threads: 1 + +# saveConfiguration_and_getKeyValues_kmer: +# jobname: GEP.{rule}.{wildcards.asmID}.{wildcards.kmer} +# mem_mb: 500 +# time: "00:05:00" +# threads: 1 +# +# +# saveConfiguration_and_getKeyValues: +# jobname: GEP.{rule}.{wildcards.asmID} +# mem_mb: 500 +# time: "00:05:05" +# threads: 1 +# +# +# aggregateAllAssemblies: +# jobname: GEP.{rule} +# mem_mb: 500 +# time: "00:05:05" +# threads: 1 +# +# makeReport: +# jobname: GEP.{rule}.{wildcards.asmID}.{wildcards.kmer} +# mem_mb: 500 +# time: "00:05:05" +# threads: 1 +# +# +# addFullTable: +# jobname: GEP.{rule} +# mem_mb: 500 +# time: "00:05:05" +# threads: 1 +# +# aggregateReport: +# jobname: GEP.{rule} +# mem_mb: 500 +# time: "00:05:05" +# threads: 1 + + +makePDF: + jobname: GEP.{rule} + mem_mb: 1000 + time: "00:10:05" + threads: 1 + +###### Rules for illuminadb building ########## + +unzipFastq_R1: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.trim10x}.{wildcards.trimAdapters} + mem_mb: 96000 + time: "01:45:05" + threads: 4 + + +unzipFastq_R2: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.trim10x}.{wildcards.trimAdapters} + mem_mb: 96000 + time: "01:45:05" + threads: 4 + +# symlinkUnzippedFastq_R1: +# jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.trim10x}.{wildcards.trimAdapters} +# mem_mb: 500 +# time: "00:01:35" +# threads: 1 +# +# symlinkUnzippedFastq_R2: +# jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.trim10x}.{wildcards.trimAdapters} +# mem_mb: 500 +# time: "00:01:35" +# threads: 1 + + + + +# symLink_trim10xbarcodes_notrimAdapt: +# jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter} +# mem_mb: 500 +# time: "00:01:35" +# threads: 1 +# +# symlinks_no10xOrAdaptTrim: +# jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter} +# mem_mb: 500 +# time: "00:01:35" +# threads: 1 +# +# +# symlinks_no10xwithAdaptTrim: +# jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter} +# mem_mb: 500 +# time: "00:01:35" +# threads: 1 +# +# +# symlink_trim10xbarcodesR2: +# jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.trimAdapters} +# mem_mb: 500 +# time: "00:01:35" +# threads: 1 + + +trim10xbarcodes: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.trimAdapters} + mem_mb: 500 + time: "02:30:05" + threads: 4 + + + +trimAdapters: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.trim10x} + mem_mb: 500 + time: "02:30:05" + threads: 8 + + +fastqc_Illumina: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.trimAdapters}.{wildcards.trim10x} + mem_mb: 1000 + time: "04:00:05" + threads: 4 + +# multiqc_hifi: +# jobname: GEP.{rule}.{wildcards.sample}.{wildcards.trimAdapters}.{wildcards.trim10x} +# mem_mb: 500 +# time: "01:00:05" +# threads: 1 + +meryl_R1: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.trimAdapters}.{wildcards.trim10x}.{wildcards.kmer} + mem_mb: 20000 + time: "01:30:05" + threads: 12 + +meryl_R2: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.trimAdapters}.{wildcards.trim10x}.{wildcards.kmer} + mem_mb: 20000 + time: "01:30:05" + threads: 12 + +meryl_illumina_build: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.trimAdapters}.{wildcards.trim10x}.{wildcards.kmer} + mem_mb: 30000 + time: "02:45:05" + threads: 12 + + + + +###### HIFI BUILD ####### + +unzipHifi: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.smrtornot} + mem_mb: 128000 + time: "00:30:05" + +# symlinkUnzippedHifi: +# jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.smrtornot} +# mem_mb: 500 +# time: "00:05:05" + +# symlinkfornotSmartTrimmed: +# jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter} +# mem_mb: 500 +# time: "00:05:05" + + +fastqc_hifi: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.smrtornot} + mem_mb: 12000 + time: "04:00:05" + +multiqc_hifi: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.smrtornot} + mem_mb: 4000 + time: "01:15:05" + +meryl_hifi_count: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter}.{wildcards.smrtornot}.{wildcards.kmer} + mem_mb: 96000 + time: "02:30:05" + +meryl_hifi_build: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.smrtornot}.{wildcards.kmer} + mem_mb: 96000 + time: "01:30:05" + + +trimSMRTbell: + jobname: GEP.{rule}.{wildcards.sample}.{wildcards.readCounter} + mem_mb: 96000 + time: "01:30:05" diff --git a/SUBMIT_CONFIG/slurm/config.yaml b/SUBMIT_CONFIG/slurm/config.yaml new file mode 100644 index 0000000..2876ae1 --- /dev/null +++ b/SUBMIT_CONFIG/slurm/config.yaml @@ -0,0 +1,40 @@ +### SBATCH JOB THAT WILL BE SUBMITTED FOR EACH RULE IN THE PIPELINE ### + +cluster: + mkdir -p slurm_logs/{rule} && + sbatch + --partition={resources.partition} + --qos={resources.qos} + --cpus-per-task={threads} + --mem={resources.mem_mb} + --job-name=GEP.{rule}.%j + --output=slurm_logs/{rule}/%j.out + --error=slurm_logs/{rule}/%j.err + --time={resources.time} + + + +### DEFAULT RESOURCES THAT WILL BE USED IN THE ABOVE SBATCH COMMAND IF NOT DEFINED IN '../../../GEP/configuration/define_resources.yaml' ### + +default-resources: + - partition=begendiv,main + - qos=standard + # - mem_mb=100000 + # - time="3-00:00:00" + # - nodes=1 + + + +### SNAKEMAKE ARGUMENTS ### + +# restart-times: 3] +# max-jobs-per-second: 100 +#max-status-checks-per-second: 10 +cores: 256 +jobs: 100 +use-conda: True +keep-going: False +rerun-incomplete: True +printshellcmds: False +scheduler: greedy +latency-wait: 60 diff --git a/Snakefile b/Snakefile new file mode 100644 index 0000000..e284fb9 --- /dev/null +++ b/Snakefile @@ -0,0 +1,303 @@ +import numpy as np +from itertools import groupby +import json +import pandas as pd +import yaml +import os +import sys +import csv +from urllib.request import urlopen +from bs4 import BeautifulSoup +import argparse, subprocess + +#temporary +from make_erga_assembly_report import create_yaml_file + +container: "docker://gepdocker/gep_dockerimage:latest" +configfile: "configuration/config.yaml" + +### LOAD IN RESOURCES CONFIG ### +with open(config['resources'], 'r') as f: + resource = yaml.safe_load(f) + +### GET BASENAME FOR READS WILDCARDS ### +# def getBasename4Reads(path): +# base=os.path.basename(path) +# return os.path.splitext(base)[0] + +### CHECK IF INPUT IS GZIPPED ### +def gzipped_or_not(path): + trueORfalse=path.endswith('.gz') + return trueORfalse + +### CHECK IF HIC AND/OR ALT_ASM FILES ARE GIVEN, OR VALUE IS NONE ### +def FILE_present_or_not(path): + if path == 'None': + return False + return True + + +### CHECK IF GENOME_SIZE IS PROVIDED, OR VALUE IS AUTO ### +def genomeSize_auto_or_not(given_size): + if given_size == 'auto': + return 0 + return given_size + + + + + # def to_be_smrtTrimmed(userAnswer): + # if userAnswer == 'True': + # string2Add="smrtTrimmed" + # elif userAnswer == 'False': + # string2Add="notsmrtTrimmed" + +samples = pd.read_csv(config['samplesTSV'], dtype=str, index_col=False, delim_whitespace=True, skip_blank_lines=True) +# print(samples) +if set(['sample', 'Library_R1', 'Library_R2', 'meryl_kmer_size', 'trim10X', 'trimAdapters', 'fastQC']).issubset(samples.columns): + whichRule = "rules/build_illumina.smk" + # samples=samples.reset_index() + samples['readCounter'] = samples.groupby(['sample']).cumcount()+1 + samples['readCounter'] = samples['readCounter'].astype(str) + samples['readCounter'] = samples['sample'] + "_LibraryPair" + samples['readCounter'] + + samples['10xtrimorNot'] = np.where(samples['trim10X'] == "True", "10xTrimmed", "not10xTrimmed") + samples['AdpaterTrimorNot'] = np.where(samples['trimAdapters'] == "True", "AdaptTrimmed", "notAdaptTrimmed") + + dictSamples=samples[['sample','meryl_kmer_size', '10xtrimorNot','AdpaterTrimorNot']] + dictSamples=dictSamples.set_index(['sample']).T.to_dict('list') + + testDictQC=samples[['sample', '10xtrimorNot', 'AdpaterTrimorNot', 'fastQC']] + testDictQC = testDictQC[testDictQC['fastQC'] == "True"] + testDictQC=testDictQC.drop_duplicates('sample', keep='first') + testDictQC=testDictQC.set_index(['sample']).T.to_dict('list') + + trimAdapters = samples[samples['trimAdapters'] == "True"] + + dictReadCounter = {} + for i in samples['sample'].unique(): + dictReadCounter[i] = [samples['readCounter'][j] for j in samples[samples['sample']==i].index] + # dkmerSize = {} + samples['gzipped_R1']=samples['Library_R1'].apply(gzipped_or_not) + samples['gzipped_R2']=samples['Library_R2'].apply(gzipped_or_not) + + + noGzip_R1 = samples[samples['gzipped_R1'] == False] + noGzip_R1=noGzip_R1.set_index(['sample','readCounter']) + + + noGzip_R2 = samples[samples['gzipped_R2'] == False] + noGzip_R2=noGzip_R2.set_index(['sample','readCounter']) + + + yesGzip_R1 = samples[samples['gzipped_R1'] == True] + yesGzip_R1=yesGzip_R1.set_index(['sample','readCounter']) + + + yesGzip_R2 = samples[samples['gzipped_R2'] == True] + yesGzip_R2=yesGzip_R2.set_index(['sample','readCounter']) + + samples=samples.set_index(['sample','readCounter']) + + ruleAllQCFiles=[] + if samples['fastQC'].str.contains('True').any(): + ruleAllQCFiles=[expand(os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/QC/multiqc/{sample}.multiqcReport.html"), sample=key) for key, [value1, value2, value3] in testDictQC.items()] + ruleAll=[expand(os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/merylDb/complete_illumina.{sample}.{kmer}.meryl"), sample=key, kmer=value1) for key, [value1, value2, value3] in dictSamples.items()] + +elif set(['sample', 'hifi_reads', 'meryl_kmer_size','trimSMRTbell', 'fastQC']).issubset(samples.columns): + whichRule = "rules/build_hifi.smk" + # samples=samples.reset_index() + samples['readCounter'] = samples.groupby(['sample']).cumcount()+1 + samples['readCounter'] = samples['readCounter'].astype(str) + samples['readCounter'] = samples['sample'] + "_Library" + samples['readCounter'] + samples['smrtornot'] = np.where(samples['trimSMRTbell'] == "True", "smrtTrimmed", "notsmrtTrimmed") + + dictSamples=samples[['sample','meryl_kmer_size', 'smrtornot']] + dictSamples=dictSamples.drop_duplicates('sample', keep='first') + dictSamples=dictSamples.set_index(['sample']).T.to_dict('list') + + + testDictQC=samples[['sample', 'smrtornot', 'fastQC']] + testDictQC = testDictQC[testDictQC['fastQC'] == "True"] + + + testDictQC=testDictQC.set_index(['sample']).T.to_dict('list') + + + dictReadCounter = {} + for i in samples['sample'].unique(): + dictReadCounter[i] = [samples['readCounter'][j] for j in samples[samples['sample']==i].index] + samples['gzipped_hifi']=samples['hifi_reads'].apply(gzipped_or_not) + + + noGzip_hifi = samples[samples['gzipped_hifi'] == False] + noGzip_hifi=noGzip_hifi.set_index(['sample','readCounter']) + + + yesGzip_hifi = samples[samples['gzipped_hifi'] == True] + yesGzip_hifi=yesGzip_hifi.set_index(['sample','readCounter']) + + + samples=samples.set_index(['sample','readCounter']) + ruleAllQCFiles=[] + + if samples['fastQC'].str.contains('True').any(): + ruleAllQCFiles=[expand(os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/QC/multiqc/{sample}.multiqcReport.html"), sample=key) for key, [value1, value2] in testDictQC.items()] + ruleAll=[expand(os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/merylDb/complete_hifi.{sample}.{kmer}.meryl"), sample=key, kmer=value1) for key, [value1, value2] in dictSamples.items()] +elif set(['ID', 'ASM_LEVEL', 'busco_lineage', 'PRI_asm', 'ALT_asm', 'merylDB', 'merylDB_kmer', 'genomeSize', 'HiC_R1', 'HiC_R2']).issubset(samples.columns): + + whichRule = "rules/evaluate.smk" + samples['genomeSize']=samples['genomeSize'].apply(genomeSize_auto_or_not) + +### make new column for whether or not path/file given is gzipped (True or False) + samples['gzipped_PRI']=samples['PRI_asm'].apply(gzipped_or_not) + samples['gzipped_ALT']=samples['ALT_asm'].apply(gzipped_or_not) + samples['gzipped_HiC_R1']=samples['HiC_R1'].apply(gzipped_or_not) + samples['gzipped_HiC_R2']=samples['HiC_R2'].apply(gzipped_or_not) + +### new column for whether or not alt asm is provided or not + samples['ALT_present']=samples['ALT_asm'].apply(FILE_present_or_not) + samples['HiC_R1_present']=samples['HiC_R1'].apply(FILE_present_or_not) + samples['HiC_R2_present']=samples['HiC_R2'].apply(FILE_present_or_not) + samples['merylDB_present']=samples['merylDB'].apply(FILE_present_or_not) + + testDictPRETEXT = samples[['ID', 'HiC_R1_present', 'HiC_R2_present']] + + testDictPRETEXT = testDictPRETEXT[(testDictPRETEXT['HiC_R1_present'] == True) & (testDictPRETEXT['HiC_R2_present'] == True)] + + testDictPRETEXT=testDictPRETEXT.set_index(['ID']).T.to_dict('list') + + noGzip_HiC_R1 = samples[samples['gzipped_HiC_R1'] == False] + noGzip_HiC_R1 = noGzip_HiC_R1.set_index(['ID']) + noGzip_HiC_R2 = samples[samples['gzipped_HiC_R2'] == False] + noGzip_HiC_R2 = noGzip_HiC_R2.set_index(['ID']) + + yesGzip_HiC_R1 = samples[samples['gzipped_HiC_R1'] == True] + yesGzip_HiC_R1 = yesGzip_HiC_R1.set_index(['ID']) + yesGzip_HiC_R2 = samples[samples['gzipped_HiC_R2'] == True] + yesGzip_HiC_R2 = yesGzip_HiC_R2.set_index(['ID']) + + noGzip_asm1 = samples[samples['gzipped_PRI'] == False] + noGzip_asm1 =noGzip_asm1.set_index(['ID']) + + yesGzip_asm1 = samples[samples['gzipped_PRI'] == True] + yesGzip_asm1 = yesGzip_asm1.set_index(['ID']) + + noGzip_asm2 = samples[samples['gzipped_ALT'] == False] + noGzip_asm2 = noGzip_asm2[noGzip_asm2['ALT_present'] == True] + noGzip_asm2 = noGzip_asm2.set_index(['ID']) + noGzip_asm2Dict=noGzip_asm2.T.to_dict('list') + + no_asm2 = samples[samples['gzipped_ALT'] == False] + no_asm2 = no_asm2[no_asm2['ALT_present'] == False] + no_asm2 = no_asm2.set_index(['ID']) + no_asm2Dict=no_asm2.T.to_dict('list') + + yesGzip_asm2 = samples[samples['gzipped_ALT'] == True] + yesGzip_asm2=yesGzip_asm2.set_index(['ID']) + + samples=samples.set_index(['ID']) + + dictSamples=samples.T.to_dict('list') + + samples['merylDB_basename'] = samples['merylDB'].apply(lambda path: os.path.basename(path)) + + #warning: check that unique merylDB path point to unique names for the meryl database + + if len(samples["merylDB_basename"].unique()) != len(samples["merylDB"].unique()): + print ("WARNING: please check that each meryl database is named differently") + + + if config['EAR']: + + if len(samples["busco_lineage"].unique()) != 1: # when EAR runs, only one busco lineage should be given + print ("check tsv file, multiple busco lineages inserted, all rows should point to the same busco lineage") + + if len(samples["merylDB"].unique()) != 1: # when EAR runs, only one merylDB should be given + print ("check tsv file, multiple merylDB inserted, all rows should point to the same database") + + + ruleAllQCFiles=[] + ruleAll=os.path.join(config['Results'],"1_evaluation/finalResults/GEP_FINAL_REPORT.pdf"), \ + os.path.join(config['Results'],"1_evaluation/finalResults/EAR_report.yaml") + + else: + ruleAllQCFiles=[] + ruleAll=os.path.join(config['Results'],"1_evaluation/finalResults/GEP_FINAL_REPORT.pdf") + + + +### DOWNLOAD BUSCO LINEAGE (IF IT DOESN'T ALREADY EXIST) #### + busco5Lineages = samples['busco_lineage'] + args_o=os.path.join(workflow.basedir, "buscoLineage") + args_l=list(set(busco5Lineages)) + # print(args_l) + for i in args_l: + checkLineagePath=args_o + "/" + i + "_odb10" + checkLineageDled=args_o + "/" + i + outDir=args_o + + + if os.path.isdir(i) is True: + # subprocess.call("ln -sf %s %s"%(args.l, args.l), shell=True) + buscoDataBaseName=os.path.basename(i) + buscoDataBaseName=buscoDataBaseName[:-6] + # print("Lineage path given, basename is:", buscoDataBaseName) + elif os.path.isdir(i) is False and os.path.isdir(checkLineagePath) is True: + # subprocess.call("ln -sf %s %s"%(checkLineagePath, checkLineageDled), shell=True) + buscoDataBaseName=os.path.basename(checkLineagePath) + buscoDataBaseName=buscoDataBaseName[:-6] + # print("Database already in buscoLineage directory, basename is:", buscoDataBaseName) + else: + # print("Database will be downloaded") + url = "https://busco-data.ezlab.org/v5/data/lineages/" + html = urlopen(url).read() + soup = BeautifulSoup(html, features="html.parser") + # kill all script and style elements + for script in soup(["script", "style"]): + script.extract() # rip it out + # get text + text = soup.get_text() + # break into lines and remove leading and trailing space on each + lines = (line.strip() for line in text.splitlines()) + linID=None + #identify the lineage file + for line in text.splitlines(): + if line.startswith(i): + # print(line) + linID=line.split(" ")[0] + # print(linID) + break + if not linID==None: + linLink=url+linID + # print(linLink) + print('Downloading Busco Database:', i) + subprocess.run("wget -q -P %s %s"%(outDir, linLink), shell=True, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + subprocess.run("tar -xvf %s*.tar.gz -C %s"%(args_o + "/" + i, outDir), shell=True, check=True,stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + subprocess.run("rm %s_*.tar.gz"%(checkLineageDled), shell=True, check=True) + buscoDataBaseName=i + print('Downloading Busco Database:', i, " - COMPLETE") + else: + raise ValueError("Error - could not identify lineage please check busco site for a correct prefix") + # busco5Lineages = samples['busco_lineage'] +else: + raise ValueError('Sample Sheet not recognised. Please make sure you are using the correct sample sheet') + + + + + + +if "Results" not in config: + config["Results"] = "results" + + + +include: whichRule + + +final_target_outputs = ruleAllQCFiles, ruleAll + +rule all: + input: + final_target_outputs diff --git a/configuration/config.yaml b/configuration/config.yaml new file mode 100644 index 0000000..c068eaf --- /dev/null +++ b/configuration/config.yaml @@ -0,0 +1,17 @@ + +### PATH TO WHERE YOU WANT YOUR RESULTS FOR THIS RUN STORED - WILL BE AUTOMATICALLY CREATED IF IT DOESN'T EXIST ### + +Results: "/scratch/galeov97/begendiv/pilot/results5" + +### FULL PATH TO YOUR SAMPLESHEET ### + +samplesTSV: "/scratch/galeov97/begendiv/pilot/runEvaltest_nohic.tsv" + +### PATH TO DEFINE RESOURCES FILE - DO NOT CHANGE UNLESS YOU WISH TO USE IN A DIFFERENT LOCATION #### + +resources: "configuration/define_resources.yaml" + +### With EAR=True the ERGA Assembly Report is automatically generated from the analysis + +EAR: False +smudgeplot: True diff --git a/configuration/define_resources.yaml b/configuration/define_resources.yaml new file mode 100644 index 0000000..367d789 --- /dev/null +++ b/configuration/define_resources.yaml @@ -0,0 +1,180 @@ +# __default__: +# jobname: 'GEP.{rule}' +# partition: begendiv,main +# # nCPUs: "{threads}" +# qos: 'standard' +# + + +#### PRETEXT #### + +unzipFastq_R1_HiC: + mem_mb: 96000 + time: "12:00:00" + threads: 4 + +unzipFastq_R2_HiC: + mem_mb: 96000 + time: "12:00:00" + threads: 4 + +indexFasta_PRI: + mem_mb: 72000 + time: "06:00:00" + threads: 1 + +convertFastqTObam_R1_HiC: + mem_mb: 32000 + time: "24:00:00" + threads: 12 + +convertFastqTObam_R2_HiC: + mem_mb: 32000 + time: "24:00:00" + threads: 12 + +filter5PrimeEnd_R1_HiC: + mem_mb: 2000 + time: "24:00:00" + threads: 4 + +filter5PrimeEnd_R2_HiC: + mem_mb: 2000 + time: "24:00:00" + threads: 4 + + +pairAndCombineFiltered_HiC: + mem_mb: 9500 + time: "24:00:00" + threads: 12 + +pretextMap: + mem_mb: 32000 + time: "24:00:00" + threads: 12 + +pretextSnapshot: + mem_mb: 1000 + time: "02:00:00" + threads: 1 + + +unzipFasta_PRI: + mem_mb: 5000 + time: "12:00:00" + threads: 4 + +unzipFasta_ALT: + mem_mb: 1000 + time: "12:00:00" + threads: 4 + +merylHistogram: + mem_mb: 1000 + time: "02:00:00" + threads: 1 + +merqury: + mem_mb: 40000 + time: "08:00:00" + threads: 8 + +smudgeplot: + mem_mb: 40000 + time: "03:00:00" + threads: 12 + +busco5: + mem_mb: 128000 + time: "1-12:00:00" + threads: 16 + +GenomeScope2Profiles: + mem_mb: 1000 + time: "02:00:00" + threads: 1 + +gfastatsResults: + mem_mb: 2000 + time: "02:00:00" + threads: 8 + +###### Rules for illuminadb building ########## + +unzipFastq_R1_illumina: + mem_mb: 96000 + time: "12:00:00" + threads: 4 + + +unzipFastq_R2_illumina: + mem_mb: 96000 + time: "12:00:00" + threads: 4 + + +trim10xBarcodes_illumina: + mem_mb: 5000 + time: "03:00:00" + threads: 4 + +trimSequencingAdapters_illumina: + mem_mb: 5000 + time: "12:00:00" + threads: 8 + + +fastQC_illumina: + mem_mb: 1000 + time: "12:00:00" + threads: 4 + + +merylCount_R1_illumina: + mem_mb: 20000 + time: "12:00:00" + threads: 12 + +merylCount_R2_illumina: + mem_mb: 20000 + time: "12:00:00" + threads: 12 + +merylUnion_illumina: + mem_mb: 30000 + time: "12:00:00" + threads: 12 + + +###### HIFI BUILD ####### + +unzipFastq_hifi: + mem_mb: 128000 + time: "12:00:00" + threads: 4 + +trimSMRTBellAdapters_hifi: + mem_mb: 96000 + time: "12:00:00" + threads: 4 + +fastQC_hifi: + mem_mb: 12000 + time: "12:00:00" + threads: 2 + +multiQC_hifi: + mem_mb: 4000 + time: "06:00:00" + threads: 2 + +merylCount_hifi: + mem_mb: 96000 + time: "12:00:00" + threads: 12 + +merylUnion_hifi: + mem_mb: 96000 + time: "12:00:00" + threads: 12 diff --git a/configuration/exampleSampleSheets/build_hifi_example.tsv b/configuration/exampleSampleSheets/build_hifi_example.tsv new file mode 100644 index 0000000..95a08df --- /dev/null +++ b/configuration/exampleSampleSheets/build_hifi_example.tsv @@ -0,0 +1,3 @@ +sample hifi_reads meryl_kmer_size trimSMRTbell fastQC +organismX /<pathto>/organismX_Hifi_Library1.fq 31 False True +organismX /<pathto>/organismX_Hifi_Library2.fq 31 False True diff --git a/configuration/exampleSampleSheets/build_illumina_example.tsv b/configuration/exampleSampleSheets/build_illumina_example.tsv new file mode 100644 index 0000000..9fc938c --- /dev/null +++ b/configuration/exampleSampleSheets/build_illumina_example.tsv @@ -0,0 +1,6 @@ +sample Library_R1 Library_R2 meryl_kmer_size trim10x trimAdapters fastQC +organismX /<pathto>/organismX_Library1_R1.fq.gz /<pathto>/organismX_Library1_R2.fq.gz 21 False False True +organismX /<pathto>/organismX_Library2_R1.fq.gz /<pathto>/organismX_Library2_R2.fq.gz 21 False False True +organismY /<pathto>/organismY_Library1_R1.fastq /<pathto>/organismY_Library1_R2.fastq 21 True True True +organismY /<pathto>/organismY_Library2_R1.fastq /<pathto>/organismY_Library2_R2.fastq 21 True True True +organismY /<pathto>/organismY_Library3_R1.fastq /<pathto>/organismY_Library3_R2.fastq 21 True True True diff --git a/configuration/exampleSampleSheets/runEval_example.tsv b/configuration/exampleSampleSheets/runEval_example.tsv new file mode 100644 index 0000000..158f742 --- /dev/null +++ b/configuration/exampleSampleSheets/runEval_example.tsv @@ -0,0 +1,4 @@ +ID ASM_LEVEL busco_lineage PRI_asm ALT_asm merylDB merylDB_kmer genomeSize HiC_R1 HiC_R2 +AssemblyX.illu chr <busco_db> /<pathto>/speciesX.fasta None /<pathTo>/speciesX.illuminaDB.meryl 21 auto None None +AssemblyX.HiFi chr <busco_db> /<pathto>/speciesX.fasta None /<pathTo>/speciesX.hifiDB.meryl 31 auto None None +AssemblyY.illu scaff <busco_db> /<pathto>/speciesY.Primary.fa /<pathto>/speciesY.Alternate.fa /<pathTo>/speciesY.illuminaDB.meryl 21 1050000 None None diff --git a/envs/AUXILIARY_PYTHON_SCRIPTS.yaml b/envs/AUXILIARY_PYTHON_SCRIPTS.yaml new file mode 100644 index 0000000..01def04 --- /dev/null +++ b/envs/AUXILIARY_PYTHON_SCRIPTS.yaml @@ -0,0 +1,14 @@ +channels: + - conda-forge + - bioconda + - anaconda +dependencies: + - tabulate=0.8.7 + - beautifulsoup4=4.9 + - pandoc=2.15.* + - tectonic + - wkhtmltopdf + - pandas + - numpy + - ghostscript + - python=3.9.* \ No newline at end of file diff --git a/envs/AUXILIARY_R_SCRIPTS.yaml b/envs/AUXILIARY_R_SCRIPTS.yaml new file mode 100644 index 0000000..724b7f9 --- /dev/null +++ b/envs/AUXILIARY_R_SCRIPTS.yaml @@ -0,0 +1,12 @@ +channels: + - conda-forge + - bioconda +dependencies: + - python >=3.10.4 + - r-argparse == 2.1.5 + - r-base >=4.1.3 + - r-minpack.lm == 1.2_2 + - genomescope2 == 2.0 + - r-dplyr==1.0.9 + - r-formattable==0.2.1 + - gfastats diff --git a/envs/BUSCO.yaml b/envs/BUSCO.yaml new file mode 100644 index 0000000..d986a85 --- /dev/null +++ b/envs/BUSCO.yaml @@ -0,0 +1,19 @@ +channels: + - conda-forge + - bioconda + - anaconda + - defaults +dependencies: + - augustus >=3.3 + - biopython + - blast >=2.10.1 + - fonts-conda-ecosystem + - hmmer >=3.1b2 + - metaeuk + - pandas + - prodigal + - python >=3.3 + - r-base + - r-ggplot2 >=2.2.1 + - sepp >=4.3.10 + - busco == 5.4.7 diff --git a/envs/HiC_CONTACT_MAPS.yaml b/envs/HiC_CONTACT_MAPS.yaml new file mode 100644 index 0000000..26f1e7b --- /dev/null +++ b/envs/HiC_CONTACT_MAPS.yaml @@ -0,0 +1,9 @@ +channels: + - anaconda + - bioconda + - conda-forge +dependencies: + - pretextmap == 0.1.9 + - pretextsnapshot == 0.0.4 + - bwa-mem2 == 2.2.1 + - samtools == 1.14 \ No newline at end of file diff --git a/envs/MERYL_MERQURY.yaml b/envs/MERYL_MERQURY.yaml new file mode 100644 index 0000000..9d7c900 --- /dev/null +++ b/envs/MERYL_MERQURY.yaml @@ -0,0 +1,18 @@ +channels: + - bioconda + - defaults + - conda-forge +dependencies: + - merqury == 1.3 + - bedtools == 2.29.2 + - meryl == 1.3 + - cairo == 1.16 + - openjdk == 11.0.13 + - r-argparse == 2.1.5 + - r-base >=4.0,<4.1.0a0 + - r-ggplot2=3.3.2=r40hc72bb7e_1 + - r-scales == 1.2.0 + - samtools == 1.15.1 + - xorg-libxrender == 0.9.10 + - xorg-libxext == 1.3.4 + - xorg-libxau == 1.0.9 diff --git a/envs/SMUDGEPLOT_MERQURY.yaml b/envs/SMUDGEPLOT_MERQURY.yaml new file mode 100644 index 0000000..7cd4b36 --- /dev/null +++ b/envs/SMUDGEPLOT_MERQURY.yaml @@ -0,0 +1,12 @@ +name: smudgeplot_testt +channels: + - conda-forge + - bioconda + - defaults +dependencies: + - r-base + - r-ggplot2 + - merqury == 1.3 + - bedtools == 2.29.2 + - meryl == 1.3 + - smudgeplot == 0.2.5 diff --git a/envs/UNZIP_and_QC.yaml b/envs/UNZIP_and_QC.yaml new file mode 100644 index 0000000..5afdd23 --- /dev/null +++ b/envs/UNZIP_and_QC.yaml @@ -0,0 +1,12 @@ +channels: + - anaconda + - conda-forge + - bioconda +dependencies: + - openssl==3.0.5 + - python=3.10.5=ha86cf86_0_cpython + - pigz=2.6 + - trim-galore=0.6.7 + - pandoc=2.11.4 + - multiqc=1.13 + - trimmomatic=0.39 diff --git a/installGEP.yaml b/installGEP.yaml new file mode 100644 index 0000000..368c83b --- /dev/null +++ b/installGEP.yaml @@ -0,0 +1,12 @@ +name: GEP +channels: + - conda-forge + - bioconda + - anaconda +dependencies: + - snakemake==7.30.2 + - beautifulsoup4==4.9.3 + - pandas==1.4.2 + - numpy==1.22.4 + - python>=3.10 + - tabulate==0.8.10 diff --git a/rules/build_hifi.smk b/rules/build_hifi.smk new file mode 100644 index 0000000..7dca623 --- /dev/null +++ b/rules/build_hifi.smk @@ -0,0 +1,178 @@ + +localrules: symlink_UnzippedFastq_hifi, \ + symlink_noSMRTBellAdaptTrim_hifi, \ + multiQC_hifi + + + +# def fq_to_trimSMRTbell(wildcards): +# return trimSMRTbell.loc[(wildcards.sample, wildcards.readCounter), "hifi_reads"] +# +# +# def fq_to_notTrimSMRTbell(wildcards): +# return notrimSMRTbell.loc[(wildcards.sample, wildcards.readCounter), "hifi_reads"] + + +def hifi_gzipped(wildcards): + return yesGzip_hifi.loc[(wildcards.sample, wildcards.readCounter), "hifi_reads"] + + +def hifi_notgzipped(wildcards): + return noGzip_hifi.loc[(wildcards.sample, wildcards.readCounter), "hifi_reads"] + + + +rule unzipFastq_hifi: + input: + fastq=hifi_gzipped, + output: + os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/temp_unzipFastqs/{readCounter}.{smrtornot}.fastq"), + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/logs/pigzUnzip.{readCounter}.{smrtornot}.log") + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + threads: + resource['unzipFastq_hifi']['threads'] + resources: + mem_mb=resource['unzipFastq_hifi']['mem_mb'], + time=resource['unzipFastq_hifi']['time'], + shell: + """ + pigz -p {threads} -c -d -k {input.fastq} > {output} 2> {log} + """ + +rule symlink_UnzippedFastq_hifi: + input: + fastq=hifi_notgzipped, + output: + os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/temp_unzipFastqs/{readCounter}.{smrtornot}.fastq"), + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/logs/pigzUnzip.{readCounter}.{smrtornot}.log") + container: + None + shell: + """ + ln -s {input.fastq} {output} + echo "{input.fastq} not gzipped. Symlink created in place of expected decompressed file." > {log} + """ + +rule trimSMRTBellAdapters_hifi: + input: + os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/temp_unzipFastqs/{readCounter}.smrtTrimmed.fastq"), + output: + outputFile=os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/temp_trimReads/{readCounter}.smrtTrimmed.fastq") + threads: + resource['trimSMRTBellAdapters_hifi']['threads'] + resources: + mem_mb=resource['trimSMRTBellAdapters_hifi']['mem_mb'], + time=resource['trimSMRTBellAdapters_hifi']['time'], + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/logs/trimSMRTbell.{readCounter}.log") + priority: + 15 + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + shell: + """ + (cutadapt -j {threads} -o {output.outputFile} {input} -b AAAAAAAAAAAAAAAAAATTAACGGAGGAGGAGGA --overlap 35 -b ATCTCTCTCTTTTCCTCCTCCTCCGTTGTTGTTGTTGAGAGAGAT --overlap 45 --revcomp -e 0.1 --discard-trimmed) &> {log} + """ + +rule symlink_noSMRTBellAdaptTrim_hifi: + input: + os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/temp_unzipFastqs/{readCounter}.notsmrtTrimmed.fastq"), + output: + outputFile=os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/temp_trimReads/{readCounter}.notsmrtTrimmed.fastq") + container: + None + shell: + """ + ln -s {input} {output.outputFile} + """ + +rule fastQC_hifi: + input: + os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/temp_trimReads/{readCounter}.{smrtornot}.fastq") + params: + folder2out=os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/QC/fastqc") + output: + os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/QC/fastqc/{readCounter}.{smrtornot}_fastqc.html") + threads: + resource['fastQC_hifi']['threads'] + resources: + mem_mb=resource['fastQC_hifi']['mem_mb'], + time=resource['fastQC_hifi']['time'], + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/logs/fastQC.hifi.{readCounter}.{smrtornot}.log") + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + shell: + """ + (fastqc {input} -o {params.folder2out} -t {threads}) &> {log} + """ + +rule multiQC_hifi: + input: + lambda wildcards: expand(os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/QC/fastqc/{readCounter}.{smrtornot}_fastqc.html"), sample=wildcards.sample, readCounter=dictReadCounter[wildcards.sample], smrtornot=dictSamples[wildcards.sample][1]) + params: + folder2qc=os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/QC/fastqc/"), + folder2OUT=os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/QC/multiqc/"), + filename="{sample}.multiqcReport.html" + output: + os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/QC/multiqc/{sample}.multiqcReport.html") + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/logs/multiQC.{sample}.log") + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + shell: + "(multiqc {params.folder2qc} -o {params.folder2OUT} -n {params.filename}) &> {log}" + + +rule merylCount_hifi: + input: + reads=os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/temp_trimReads/{readCounter}.{smrtornot}.fastq") + params: + kmer = "{kmer}" + threads: + resource['merylCount_hifi']['threads'] + resources: + mem_mb=resource['merylCount_hifi']['mem_mb'], + time=resource['merylCount_hifi']['time'], + output: + temp(directory(os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/merylDb/" + "{readCounter}" + "_hifi_dB.{smrtornot}.{kmer}.meryl"))), + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/logs/meryl_hifi_count.{readCounter}.{kmer}.{smrtornot}.log") + priority: + 10 + conda: + os.path.join(workflow.basedir, "envs/MERYL_MERQURY.yaml") + shell: + """ + (meryl count k={params.kmer} threads={threads} {input.reads} output {output}) &> {log} + """ + +rule merylUnion_hifi: + input: + lambda wildcards: expand(os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/merylDb/{readCounter}_hifi_dB.{smrtornot}.{kmer}.meryl/"), sample=wildcards.sample, readCounter=dictReadCounter[wildcards.sample], kmer=dictSamples[wildcards.sample][0], smrtornot=dictSamples[wildcards.sample][1]) + params: + kmer = "{kmer}", + removeReadDIR_trimmed=os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/temp_trimReads/"), + removeReadDIR_unzipped=os.path.join(config['Results'],"0_buildDatabases/{sample}/hifiReads/temp_unzipFastqs/") + threads: + resource['merylUnion_hifi']['threads'] + resources: + mem_mb=resource['merylUnion_hifi']['mem_mb'], + time=resource['merylUnion_hifi']['time'], + output: + directory(os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/merylDb/complete_hifi.{sample}.{kmer}.meryl")), + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/hifiReads/logs/meryl_hifi_combine.{sample}.{kmer}.log") + priority: + 10 + conda: + os.path.join(workflow.basedir, "envs/MERYL_MERQURY.yaml") + shell: + """ + (meryl union-sum {input} output {output}) &> {log} + rm -r {params.removeReadDIR_trimmed} + rm -r {params.removeReadDIR_unzipped} + """ diff --git a/rules/build_illumina.smk b/rules/build_illumina.smk new file mode 100644 index 0000000..b5c4d41 --- /dev/null +++ b/rules/build_illumina.smk @@ -0,0 +1,310 @@ + + + + +localrules: symlink_UnzippedFastq_R1_illumina,\ + symlink_UnzippedFastq_R2_illumina, \ + symLink_Trim10xBarcodes_noSequencingAdaptTrim_illumina, \ + symlink_No10xWithSequencingAdaptTrim_illumina, \ + symlink_No10xOrSequencingAdaptTrim_illumina, \ + symlink_Trim10xBarcodes_R2_illumina, \ + multiQC_illumina + + + + +def R1_gzipped(wildcards): + return yesGzip_R1.loc[(wildcards.sample, wildcards.readCounter), "Library_R1"] + +def R1_notgzipped(wildcards): + return noGzip_R1.loc[(wildcards.sample, wildcards.readCounter), "Library_R1"] + +def R2_gzipped(wildcards): + return yesGzip_R2.loc[(wildcards.sample, wildcards.readCounter), "Library_R2"] + +def R2_notgzipped(wildcards): + return noGzip_R2.loc[(wildcards.sample, wildcards.readCounter), "Library_R2"] + +rule unzipFastq_R1_illumina: + input: + assembly=R1_gzipped, + output: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/temp_unzipFastqs/{readCounter}.{trim10x}.{trimAdapters}_R1.fastq"), + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/logs/{readCounter}.{trim10x}.{trimAdapters}_R1_pigzUnzip.log"), + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + threads: + resource['unzipFastq_R1_illumina']['threads'] + resources: + mem_mb=resource['unzipFastq_R1_illumina']['mem_mb'], + time=resource['unzipFastq_R1_illumina']['time'] + shell: + """ + pigz -p {threads} -c -d -k {input.assembly} > {output} 2> {log} + """ + +rule symlink_UnzippedFastq_R1_illumina: + input: + assembly=R1_notgzipped, + output: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/temp_unzipFastqs/{readCounter}.{trim10x}.{trimAdapters}_R1.fastq"), + container: + None + shell: + """ + ln -s {input} {output} + """ + +rule unzipFastq_R2_illumina: + input: + assembly=R2_gzipped, + output: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/temp_unzipFastqs/{readCounter}.{trim10x}.{trimAdapters}_R2.fastq"), + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/logs/{readCounter}.{trim10x}.{trimAdapters}_R2_pigzUnzip.log") + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + threads: + resource['unzipFastq_R2_illumina']['threads'] + resources: + mem_mb=resource['unzipFastq_R2_illumina']['mem_mb'], + time=resource['unzipFastq_R2_illumina']['time'] + shell: + """ + pigz -p {threads} -c -d -k {input.assembly} > {output} 2> {log} + """ + +rule symlink_UnzippedFastq_R2_illumina: + input: + assembly=R2_notgzipped, + output: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/temp_unzipFastqs/{readCounter}.{trim10x}.{trimAdapters}_R2.fastq"), + container: + None + shell: + """ + ln -s {input} {output} + """ + + +rule trim10xBarcodes_illumina: + input: + read1=os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/temp_unzipFastqs/{readCounter}.10xTrimmed.{trimAdapters}_R1.fastq"), + output: + read1=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.10xTrimmed.{trimAdapters}_Read1.fastq"), + threads: + resource['trim10xBarcodes_illumina']['threads'] + resources: + mem_mb=resource['trim10xBarcodes_illumina']['mem_mb'], + time=resource['trim10xBarcodes_illumina']['time'] + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/logs/{readCounter}.10xTrimmed.10BarcodeRemoval_Trimmomatic.{trimAdapters}.log") + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + shell: + """ + (trimmomatic SE -threads {threads} {input.read1} {output.read1} HEADCROP:23) &> {log} + """ + + +rule symlink_Trim10xBarcodes_R2_illumina: + input: + read2=os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/temp_unzipFastqs/{readCounter}.10xTrimmed.{trimAdapters}_R2.fastq") + output: + read2=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.10xTrimmed.{trimAdapters}_Read2.fastq") + container: + None + shell: + """ + ln -s {input.read2} {output.read2} + """ + + + +rule symLink_Trim10xBarcodes_noSequencingAdaptTrim_illumina: + input: + read1=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.10xTrimmed.notAdaptTrimmed_Read1.fastq"), + read2=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.10xTrimmed.notAdaptTrimmed_Read2.fastq"), + output: + read1=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.10xTrimmed.notAdaptTrimmed_val_1.fq"), + read2=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.10xTrimmed.notAdaptTrimmed_val_2.fq") + container: + None + shell: + """ + ln -s {input.read1} {output.read1} + ln -s {input.read2} {output.read2} + """ + +rule symlink_No10xOrSequencingAdaptTrim_illumina: + input: + read1=os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/temp_unzipFastqs/{readCounter}.not10xTrimmed.notAdaptTrimmed_R1.fastq"), + read2=os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/temp_unzipFastqs/{readCounter}.not10xTrimmed.notAdaptTrimmed_R2.fastq") + output: + read1=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.not10xTrimmed.notAdaptTrimmed_val_1.fq"), + read2=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.not10xTrimmed.notAdaptTrimmed_val_2.fq") + container: + None + shell: + """ + ln -s {input.read1} {output.read1} + ln -s {input.read2} {output.read2} + """ + +rule symlink_No10xWithSequencingAdaptTrim_illumina: + input: + read1=os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/temp_unzipFastqs/{readCounter}.not10xTrimmed.AdaptTrimmed_R1.fastq"), + read2=os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/temp_unzipFastqs/{readCounter}.not10xTrimmed.AdaptTrimmed_R2.fastq") + output: + read1=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.not10xTrimmed.AdaptTrimmed_Read1.fastq"), + read2=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.not10xTrimmed.AdaptTrimmed_Read2.fastq") + container: + None + shell: + """ + ln -s {input.read1} {output.read1} + ln -s {input.read2} {output.read2} + """ + +rule trimSequencingAdapters_illumina: + input: + read1= os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.{trim10x}.AdaptTrimmed_Read1.fastq"), + read2= os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.{trim10x}.AdaptTrimmed_Read2.fastq"), + params: + outputDir=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/"), + r1_prefix="{readCounter}.{trim10x}.AdaptTrimmed", + output: + read1=temp(os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.{trim10x}.AdaptTrimmed_val_1.fq")), + read2=temp(os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.{trim10x}.AdaptTrimmed_val_2.fq")) + threads: + resource['trimSequencingAdapters_illumina']['threads'] + resources: + mem_mb=resource['trimSequencingAdapters_illumina']['mem_mb'], + time=resource['trimSequencingAdapters_illumina']['time'], + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/logs/{readCounter}.{trim10x}.AdaptTrimmed_tGalore.log") + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + shell: + """ + (trim_galore -j {threads} --basename {params.r1_prefix} --dont_gzip --length 65 -o {params.outputDir} --paired {input.read1} {input.read2}) &> {log} + """ + +rule fastQC_illumina: + input: + read1=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.{trim10x}.{trimAdapters}_val_1.fq"), + read2=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.{trim10x}.{trimAdapters}_val_2.fq") + params: + folder2out=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/QC/fastqc") + output: + os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/QC/fastqc/{readCounter}.{trim10x}.{trimAdapters}_val_1_fastqc.html"), + os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/QC/fastqc/{readCounter}.{trim10x}.{trimAdapters}_val_2_fastqc.html") + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/logs/{readCounter}.{trim10x}.{trimAdapters}_fastqc.log") + threads: + resource['fastQC_illumina']['threads'] + resources: + mem_mb=resource['fastQC_illumina']['mem_mb'], + time=resource['fastQC_illumina']['time'], + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + shell: + "(fastqc {input} -o {params.folder2out} -t {threads}) &> {log}" + +rule multiQC_illumina: + input: + read1=lambda wildcards: expand(os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/QC/fastqc/{readCounter}.{trim10x}.{trimAdapters}_val_1_fastqc.html"), sample=wildcards.sample, readCounter=dictReadCounter[wildcards.sample], trim10x=dictSamples[wildcards.sample][1], trimAdapters=dictSamples[wildcards.sample][2]), + read2=lambda wildcards: expand(os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/QC/fastqc/{readCounter}.{trim10x}.{trimAdapters}_val_2_fastqc.html"), sample=wildcards.sample, readCounter=dictReadCounter[wildcards.sample], trim10x=dictSamples[wildcards.sample][1], trimAdapters=dictSamples[wildcards.sample][2]) + params: + folder2qc=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/QC/fastqc/"), + folder2out=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/QC/multiqc/"), + filename="{sample}.multiqcReport.html" + output: + os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/QC/multiqc/{sample}.multiqcReport.html") + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/logs/{sample}.multiqc.log") + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + shell: + "(multiqc {params.folder2qc} -o {params.folder2out} -n {params.filename}) &> {log}" + + + +rule merylCount_R1_illumina: + input: + read1= os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.{trim10x}.{trimAdapters}_val_1.fq") + params: + kmer = "{kmer}", + threads: + resource['merylCount_R1_illumina']['threads'] + resources: + mem_mb=resource['merylCount_R1_illumina']['mem_mb'], + time=resource['merylCount_R1_illumina']['time'], + output: + temp(directory(os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/merylDb/{readCounter}.{trim10x}.{trimAdapters}_R1.{kmer}.meryl"))) + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/logs/{readCounter}.{trim10x}.{trimAdapters}_meryl_R1.{kmer}.log") + priority: + 10 + conda: + os.path.join(workflow.basedir, "envs/MERYL_MERQURY.yaml") + shell: + """ + (meryl count k={params.kmer} threads={threads} {input.read1} output {output}) &> {log} + """ + +rule merylCount_R2_illumina: + input: + read2= os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.{trim10x}.{trimAdapters}_val_2.fq") + params: + kmer = "{kmer}", + threads: + resource['merylCount_R2_illumina']['threads'] + resources: + mem_mb=resource['merylCount_R2_illumina']['mem_mb'], + time=resource['merylCount_R2_illumina']['time'], + output: + temp(directory(os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/merylDb/{readCounter}.{trim10x}.{trimAdapters}_R2.{kmer}.meryl"))) + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/logs/{readCounter}.{trim10x}.{trimAdapters}_meryl_R2.{kmer}.log") + priority: + 10 + conda: + os.path.join(workflow.basedir, "envs/MERYL_MERQURY.yaml") + shell: + """ + (meryl count k={params.kmer} threads={threads} {input.read2} output {output}) &> {log} + """ + + +rule merylUnion_illumina: + input: + # removeReads1=lambda wildcards: expand(os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.{trim10x}.{trimAdapters}_val_1.fq"), sample=wildcards.sample, readCounter=dictReadCounter[wildcards.sample], trim10x=dictSamples[wildcards.sample][1], trimAdapters=dictSamples[wildcards.sample][2]), + # removeReads2=lambda wildcards: expand(os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/{readCounter}.{trim10x}.{trimAdapters}_val_2.fq"), sample=wildcards.sample, readCounter=dictReadCounter[wildcards.sample], trim10x=dictSamples[wildcards.sample][1], trimAdapters=dictSamples[wildcards.sample][2]), + read1=lambda wildcards: expand(os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/merylDb/{readCounter}.{trim10x}.{trimAdapters}_R1.{kmer}.meryl"), sample=wildcards.sample, readCounter=dictReadCounter[wildcards.sample], trim10x=dictSamples[wildcards.sample][1], kmer=dictSamples[wildcards.sample][0], trimAdapters=dictSamples[wildcards.sample][2]), + read2=lambda wildcards: expand(os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/merylDb/{readCounter}.{trim10x}.{trimAdapters}_R2.{kmer}.meryl"), sample=wildcards.sample, readCounter=dictReadCounter[wildcards.sample], trim10x=dictSamples[wildcards.sample][1], kmer=dictSamples[wildcards.sample][0], trimAdapters=dictSamples[wildcards.sample][2]) + params: + removeReadDIR_trimmed=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_trimReads/"), + removeReadDIR_unzipped=os.path.join(config['Results'],"0_buildDatabases/{sample}/illuminaReads/temp_unzipFastqs/"), + kmer = "{kmer}", + path= os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/merylDb/") + threads: + resource['merylUnion_illumina']['threads'] + resources: + mem_mb=resource['merylUnion_illumina']['mem_mb'], + time=resource['merylUnion_illumina']['time'], + output: + directory(os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/merylDb/complete_illumina.{sample}.{kmer}.meryl")), + log: + os.path.join(config['Results'], "0_buildDatabases/{sample}/illuminaReads/logs/{sample}.illumina_meryl.{kmer}.log") + priority: + 10 + conda: + os.path.join(workflow.basedir, "envs/MERYL_MERQURY.yaml") + shell: + """ + (meryl union-sum {input.read1} {input.read2} output {output}) &> {log} + rm -r {params.removeReadDIR_trimmed} + rm -r {params.removeReadDIR_unzipped} + """ diff --git a/rules/evaluate.smk b/rules/evaluate.smk new file mode 100644 index 0000000..5b52808 --- /dev/null +++ b/rules/evaluate.smk @@ -0,0 +1,1039 @@ +localrules: symlink_UnzippedFastq_R1_HiC, \ + symlink_UnzippedFastq_R2_HiC, \ + symlink_UnzippedFasta_asm1, \ + symlink_UnzippedFasta_asm2, \ + KeyResults_GenomescopeProfiles, \ + KeyResults_statistics, \ + KeyResults_merqury, \ + KeyResults_busco, \ + KeyResults_smudgeplot, \ + all_metrics_table_asm1, \ + all_metrics_table_asm2, \ + IndividualResults_md, \ + PretextMaps_md, \ + Reports_md, \ + Reports_pdf, \ + ColouredTable_html, \ + HeatmapTable_html, \ + BothTables_pdf, \ + ConcatAll_pdfs, \ + symlink_genomescope, \ + symlink_smudgeplot, \ + all_metrics_final_table, \ + Table_md_all_metrics, \ + make_ear + +def HiC_R1_gzipped(wildcards): + return yesGzip_HiC_R1.loc[(wildcards.asmID), "HiC_R1"] + +rule unzipFastq_R1_HiC: + input: + assembly=HiC_R1_gzipped, + output: + temp(os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R1.fastq")), + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/pigzUnzip.HiC.R1.{asmID}.log") + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + threads: + resource['unzipFastq_R1_HiC']['threads'] + resources: + mem_mb=resource['unzipFastq_R1_HiC']['mem_mb'], + time=resource['unzipFastq_R1_HiC']['time'], + shell: + """ + pigz -p {threads} -c -d -k {input.assembly} > {output} 2> {log} + """ + +def HiC_R1_unzipped(wildcards): + return noGzip_HiC_R1.loc[(wildcards.asmID), "HiC_R1"] + +rule symlink_UnzippedFastq_R1_HiC: + input: + assembly=HiC_R1_unzipped, + output: + temp(os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R1.fastq")), + container: + None + shell: + """ + ln -sf {input} {output} + """ + + +def HiC_R2_gzipped(wildcards): + return yesGzip_HiC_R2.loc[(wildcards.asmID), "HiC_R2"] + +rule unzipFastq_R2_HiC: + input: + assembly=HiC_R2_gzipped, + output: + temp(os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R2.fastq")), + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/pigzUnzip..HiC.R2.{asmID}.log") + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + threads: + resource['unzipFastq_R2_HiC']['threads'] + resources: + mem_mb=resource['unzipFastq_R2_HiC']['mem_mb'], + time=resource['unzipFastq_R2_HiC']['time'] + shell: + """ + pigz -p {threads} -c -d -k {input.assembly} > {output} 2> {log} + """ + +def HiC_R2_unzipped(wildcards): + return noGzip_HiC_R2.loc[(wildcards.asmID), "HiC_R2"] + +rule symlink_UnzippedFastq_R2_HiC: + input: + assembly=HiC_R2_unzipped, + output: + temp(os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R2.fastq")), + container: + None + shell: + """ + ln -sf {input} {output} + """ + + +rule indexFasta_asm1: + input: + assembly_asm1=os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta"), + output: + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.0123"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.amb"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.ann"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.bwt.2bit.64"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.pac"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.fai") + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/indexASM.asm1.{asmID}.log") + conda: + os.path.join(workflow.basedir, "envs/HiC_CONTACT_MAPS.yaml") + threads: + resource['indexFasta_PRI']['threads'] + resources: + mem_mb=resource['indexFasta_PRI']['mem_mb'], + time=resource['indexFasta_PRI']['time'] + shell: + """ + (bwa-mem2 index {input.assembly_asm1}) &> {log} + (samtools faidx {input.assembly_asm1}) &>> {log} + """ + +rule convertFastqTObam_R1_HiC: + input: + HiC_R1=os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R1.fastq"), + assembly=os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta"), + indexes= [ os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.0123"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.amb"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.ann"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.bwt.2bit.64"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.pac"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.fai") ] + output: + temp(os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R1.bam")) + conda: + os.path.join(workflow.basedir, "envs/HiC_CONTACT_MAPS.yaml") + threads: + resource['convertFastqTObam_R1_HiC']['threads'] + resources: + mem_mb=resource['convertFastqTObam_R1_HiC']['mem_mb'], + time=resource['convertFastqTObam_R1_HiC']['time'] + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/fastq2bam.HiC.R1.{asmID}.log") + shell: + """ + (bwa-mem2 mem -t {threads} -B8 {input.assembly} {input.HiC_R1} | samtools view -Sb - > {output}) &> {log} + """ + +rule convertFastqTObam_R2_HiC: + input: + HiC_R2=os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R2.fastq"), + assembly=os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta"), + indexes= [ os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.0123"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.amb"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.ann"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.bwt.2bit.64"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.pac"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.fai") ] + output: + temp(os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R2.bam")) + conda: + os.path.join(workflow.basedir, "envs/HiC_CONTACT_MAPS.yaml") + threads: + resource['convertFastqTObam_R2_HiC']['threads'] + resources: + mem_mb=resource['convertFastqTObam_R2_HiC']['mem_mb'], + time=resource['convertFastqTObam_R2_HiC']['time'] + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/fastq2bam.HiC.R2.{asmID}.log") + shell: + """ + (bwa-mem2 mem -t {threads} -B8 {input.assembly} {input.HiC_R2} | samtools view -Sb - > {output}) &> {log} + """ + +rule filter5PrimeEnd_R1_HiC: + input: + HiC_R1_bam=os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R1.bam"), + assembly=os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta"), + indexes= [ os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.0123"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.amb"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.ann"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.bwt.2bit.64"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.pac"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.fai") ] + output: + temp(os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R1.FILTERED.bam")) + params: + script=os.path.join(workflow.basedir, "scripts/process_HiC/filter_five_end.pl") + conda: + os.path.join(workflow.basedir, "envs/HiC_CONTACT_MAPS.yaml") + threads: + resource['filter5PrimeEnd_R1_HiC']['threads'] + resources: + mem_mb=resource['filter5PrimeEnd_R1_HiC']['mem_mb'], + time=resource['filter5PrimeEnd_R1_HiC']['time'] + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/filtered.HiC.R1.{asmID}.log") + shell: + """ + (samtools view -h {input.HiC_R1_bam}| perl {params.script} | samtools view -@{threads} -Sb - > {output}) &> {log} + """ + +rule filter5PrimeEnd_R2_HiC: + input: + HiC_R2_bam=os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R2.bam"), + assembly=os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta"), + indexes= [ os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.0123"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.amb"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.ann"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.bwt.2bit.64"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.pac"), + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta.fai") ] + output: + temp(os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R2.FILTERED.bam")) + params: + script=os.path.join(workflow.basedir, "scripts/process_HiC/filter_five_end.pl") + conda: + os.path.join(workflow.basedir, "envs/HiC_CONTACT_MAPS.yaml") + threads: + resource['filter5PrimeEnd_R2_HiC']['threads'] + resources: + mem_mb=resource['filter5PrimeEnd_R2_HiC']['mem_mb'], + time=resource['filter5PrimeEnd_R2_HiC']['time'] + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/filtered.HiC.R1.{asmID}.log") + shell: + """ + (samtools view -h {input.HiC_R2_bam}| perl {params.script} | samtools view -@{threads} -Sb - > {output}) &> {log} + """ + + +rule pairAndCombineFiltered_HiC: + input: + R1=os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R1.FILTERED.bam"), + R2=os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.R2.FILTERED.bam") + output: + temp(os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.COMBINED.FILTERED.bam")) + params: + script=os.path.join(workflow.basedir, "scripts/process_HiC/two_read_bam_combiner.pl") + conda: + os.path.join(workflow.basedir, "envs/HiC_CONTACT_MAPS.yaml") + threads: + resource['pairAndCombineFiltered_HiC']['threads'] + resources: + mem_mb=resource['pairAndCombineFiltered_HiC']['mem_mb'], + time=resource['pairAndCombineFiltered_HiC']['time'] + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/combine.filtered.HiC.{asmID}.log") + shell: + """ + (perl {params.script} {input.R1} {input.R2} | samtools view -@{threads} -Sb > {output}) &>{log} + """ + +rule pretextMap: + input: + HiC_alignment=os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.COMBINED.FILTERED.bam") + output: + pretextFile=os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.COMBINED.FILTERED.pretext") + conda: + os.path.join(workflow.basedir, "envs/HiC_CONTACT_MAPS.yaml") + threads: + resource['pretextMap']['threads'] + resources: + mem_mb=resource['pretextMap']['mem_mb'], + time=resource['pretextMap']['time'] + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/PretextMap.{asmID}.log") + shell: + """ + (samtools view -h {input.HiC_alignment} | PretextMap -o {output.pretextFile} --sortby length --mapq 10) &> {log} + """ + +rule pretextSnapshot: + input: + pretextFile=os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.COMBINED.FILTERED.pretext") + output: + pretextSnapshotFULL=os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.COMBINED.FILTERED_FullMap.png"), + params: + outDirectory=os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/") + conda: + os.path.join(workflow.basedir, "envs/HiC_CONTACT_MAPS.yaml") + threads: + resource['pretextSnapshot']['threads'] + resources: + mem_mb=resource['pretextSnapshot']['mem_mb'], + time=resource['pretextSnapshot']['time'] + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/PretextSnapshot.{asmID}.log") + shell: + """ + (PretextSnapshot -m {input.pretextFile} -o {params.outDirectory}) &> {log} + """ + + +####################################################################################################################################### + +def asm1_gzipped(wildcards): + return yesGzip_asm1.loc[(wildcards.asmID), "PRI_asm"] + + +rule unzipFasta_asm1: + input: + assembly=asm1_gzipped, + output: + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta"), + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/pigzUnzip.asm1.{asmID}.log") + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + threads: + resource['unzipFasta_PRI']['threads'] + resources: + mem_mb=resource['unzipFasta_PRI']['mem_mb'], + time=resource['unzipFasta_PRI']['time'] + shell: + """ + pigz -p {threads} -c -d -k {input.assembly} > {output} 2> {log} + """ + +def asm1_unzipped(wildcards): + return noGzip_asm1.loc[(wildcards.asmID), "PRI_asm"] + +rule symlink_UnzippedFasta_asm1: + input: + assembly=asm1_unzipped, + output: + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta"), + container: + None + shell: + """ + ln -fs {input} {output} + """ + +################ + +def asm2_gzipped(wildcards): + return yesGzip_asm2.loc[(wildcards.asmID), "ALT_asm"] + +rule unzipFasta_asm2: + input: + assembly=asm2_gzipped, + output: + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm2.fasta"), + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/pigzUnzip.asm2.{asmID}.log") + conda: + os.path.join(workflow.basedir, "envs/UNZIP_and_QC.yaml") + threads: + resource['unzipFasta_ALT']['threads'] + resources: + mem_mb=resource['unzipFasta_ALT']['mem_mb'], + time=resource['unzipFasta_ALT']['time'] + shell: + """ + pigz -p {threads} -c -d -k {input.assembly} > {output} 2> {log} + """ + + +def asm2_unzipped(wildcards): + return noGzip_asm2.loc[(wildcards.asmID), "ALT_asm"] + +rule symlink_UnzippedFasta_asm2: + input: + assembly=asm2_unzipped, + output: + os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm2.fasta"), + container: + None + shell: + """ + ln -s {input} {output} + """ + + +#################### + + +def asm2File(wildcards): + if samples.loc[(wildcards.asmID), "ALT_present"] == True: + return os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm2.fasta") + else: + return os.path.join(workflow.basedir, "scripts/standIN_files/ALT_missing.fasta") + +def merylDB(wildcards): + return samples.loc[(wildcards.asmID), "merylDB"] + +rule merqury: + input: + assembly1=os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta"), + assembly2=asm2File, + merylDB_provided=merylDB + params: + outFile= "{asmID}" + "_merqOutput", + outPath= os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA"), + symlink_merylDB=directory(os.path.join(config['Results'], "1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/merylDB_providedFor_{asmID}.meryl")) + threads: + resource['merqury']['threads'] + resources: + mem_mb=resource['merqury']['mem_mb'], + time=resource['merqury']['time'] + output: + os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.qv"), + os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.completeness.stats"), + os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.{asmID}.asm1.spectra-cn.st.png"), + os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.{asmID}.asm1.spectra-cn.fl.png"), + os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.spectra-cn.st.png"), + os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.spectra-cn.fl.png"), + os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/merylDB_providedFor_{asmID}.hist") + log: + os.path.join(config['Results'],"1_evaluation/{asmID}/logs/merqury.{asmID}.log") + priority: + 3 + conda: + os.path.join(workflow.basedir, "envs/SMUDGEPLOT_MERQURY.yaml") + shell: + """ + ln -fs {input.merylDB_provided} {params.symlink_merylDB} + cd {params.outPath} + export OMP_NUM_THREADS={threads} + (merqury.sh {params.symlink_merylDB} {input.assembly1} {input.assembly2} {params.outFile}) &> {log} + """ + + + +####################################################################################################################################### + +def busco_lineage_path(wildcards): + lineage = samples.loc[(wildcards.asmID), "busco_lineage"] + path = os.path.join(workflow.basedir, "buscoLineage/" + lineage + "_odb10") + return path + +def busco_output(wildcards): + lineage = samples.loc[(wildcards.asmID), "busco_lineage"] + path = os.path.join(config['Results'], "1_evaluation/" + wildcards.asmID + "/BUSCOs/" + wildcards.asmID + "/short_summary.specific." + lineage + "_odb10." + wildcards.asmID + ".txt") + return path + +def busco_output_asm(wildcards, asm): + lineage = samples.loc[(wildcards.asmID), "busco_lineage"] + path = os.path.join(config['Results'], "1_evaluation/" + wildcards.asmID + "/BUSCOs/" + asm + "/short_summary.specific." + lineage + "_odb10." + asm + ".txt") + return path + +def busco_lineage(wildcards): + return samples.loc[(wildcards.asmID), "busco_lineage"] + + +rule busco5: + input: + assembly1=os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta"), + lineage_path = busco_lineage_path + params: + chngDir = os.path.join(config['Results'], "1_evaluation/{asmID}/BUSCOs") + threads: + resource['busco5']['threads'] + resources: + mem_mb=resource['busco5']['mem_mb'], + time=resource['busco5']['time'] + output: + os.path.join(config['Results'], "1_evaluation/{asmID}/BUSCOs/asm1/busco.done") + conda: + os.path.join(workflow.basedir, "envs/BUSCO.yaml") + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/busco5.{asmID}_asm1.log") + priority: + 20 + shell: + """ + cd {params.chngDir} + (busco -m genome --offline --in {input.assembly1} -o asm1 -l {input.lineage_path} -c {threads} -f --limit 5) &> {log} + + touch {output} + """ + +rule busco5_asm2: + input: + assembly2 = asm2File, + lineage_path = busco_lineage_path + params: + chngDir = os.path.join(config['Results'], "1_evaluation/{asmID}/BUSCOs") + threads: + resource['busco5']['threads'] + resources: + mem_mb=resource['busco5']['mem_mb'], + time=resource['busco5']['time'] + output: + os.path.join(config['Results'], "1_evaluation/{asmID}/BUSCOs/asm2/busco.done") + conda: + os.path.join(workflow.basedir, "envs/BUSCO.yaml") + log: + os.path.join(config['Results'], "1_evaluation/{asmID}/logs/busco5.{asmID}_asm2.log") + priority: + 20 + shell: + """ + cd {params.chngDir} + (busco -m genome --offline --in {input.assembly2} -o asm2 -l {input.lineage_path} -c {threads} -f --limit 5) &> {log} + + touch {output} + """ + + + +rule make_kmer_histogram: + output: + hist=os.path.join(config['Results'], "1_evaluation/kmer_profiling/{name}/merylDB.hist") + params: + meryl_path=lambda wildcards: samples[samples['merylDB_basename'] == wildcards.name]['merylDB'].values[0] + conda: + os.path.join(workflow.basedir, "envs/SMUDGEPLOT_MERQURY.yaml") + log: + os.path.join(config['Results'],"1_evaluation/kmer_profiling/{name}/logs/make_kmer_histogram.log") + threads: + resource['merylHistogram']['threads'] + resources: + mem_mb=resource['merylHistogram']['mem_mb'], + time=resource['merylHistogram']['time'] + shell: + """ + (meryl histogram {params.meryl_path} > {output.hist}) &> {log} + """ + +rule genomescope2: + input: + hist=os.path.join(config['Results'], "1_evaluation/kmer_profiling/{name}/merylDB.hist") + output: + summary=os.path.join(config['Results'], "1_evaluation/kmer_profiling/{name}/genomescope/results_summary.txt"), + logPlot=os.path.join(config['Results'], "1_evaluation/kmer_profiling/{name}/genomescope/results_log_plot.png"), + linearPlot=os.path.join(config['Results'], "1_evaluation/kmer_profiling/{name}/genomescope/results_linear_plot.png"), + estimatedSize=os.path.join(config['Results'], "1_evaluation/kmer_profiling/{name}/genomescope/results_sizeEst.txt") + params: + outFolder=os.path.join(config['Results'], "1_evaluation/kmer_profiling/{name}/genomescope/"), + name_file="results", + kmer=lambda wildcards: samples[samples['merylDB_basename'] == wildcards.name]['merylDB_kmer'].values[0], + cpHist=os.path.join(config['Results'], "1_evaluation/kmer_profiling/{name}/merylDB_10000.hist") + conda: + os.path.join(workflow.basedir, "envs/AUXILIARY_R_SCRIPTS.yaml") + log: + os.path.join(config['Results'],"1_evaluation/kmer_profiling/{name}/logs/genomescope2.log") + threads: + resource['GenomeScope2Profiles']['threads'] + resources: + mem_mb=resource['GenomeScope2Profiles']['mem_mb'], + time=resource['GenomeScope2Profiles']['time'] + shell: + """ + head -n 10000 {input.hist} > {params.cpHist} + (genomescope2 -i {params.cpHist} -o {params.outFolder} -k {params.kmer} -n {params.name_file}) &> {log} + grep "Genome Haploid Length" {output.summary} | awk {{'print $6'}} | sed 's/,//g'> {output.estimatedSize} + rm {params.cpHist} + """ + + +rule smudgeplot: + input: + hist=os.path.join(config['Results'], "1_evaluation/kmer_profiling/{name}/merylDB.hist") + output: + plot1=os.path.join(config['Results'],"1_evaluation/kmer_profiling/{name}/smudgeplot/results_smudgeplot.png"), + plot2=os.path.join(config['Results'],"1_evaluation/kmer_profiling/{name}/smudgeplot/results_smudgeplot_log10.png"), + summary_table=os.path.join(config['Results'],"1_evaluation/kmer_profiling/{name}/smudgeplot/results_summary_table.tsv") + params: + output_path=os.path.join(config['Results'],"1_evaluation/kmer_profiling/{name}/smudgeplot"), + meryl_path=lambda wildcards: samples[samples['merylDB_basename'] == wildcards.name]['merylDB'].values[0], + kmer=lambda wildcards: samples[samples['merylDB_basename'] == wildcards.name]['merylDB_kmer'].values[0] + conda: + os.path.join(workflow.basedir, "envs/SMUDGEPLOT_MERQURY.yaml") + threads: + resource['smudgeplot']['threads'] + resources: + mem_mb=resource['smudgeplot']['mem_mb'], + time=resource['smudgeplot']['time'] + log: + os.path.join(config['Results'],"1_evaluation/kmer_profiling/{name}/logs/smudgeplot.log") + shell: + """ + meryl histogram {params.meryl_path} > {input.hist} + + L=$(smudgeplot.py cutoff {input.hist} L) + U=$(smudgeplot.py cutoff {input.hist} U) + + (meryl print less-than ${{U}} greater-than ${{L}} threads={threads} memory={resources.mem_mb} {params.meryl_path} | sort > {params.output_path}/meryl_L${{L}}_U${{U}}.dump) &> {log} + + (smudgeplot.py hetkmers -o {params.output_path}/meryl_L${{L}}_U${{U}} --middle {params.output_path}/meryl_L${{L}}_U${{U}}.dump) &> {log} + + (smudgeplot.py plot -o {params.output_path}/results {params.output_path}/meryl_L${{L}}_U${{U}}_coverages.tsv -k {params.kmer} ) &> {log} + """ + + +def merylDB_basename(): + unique_names = samples["merylDB_basename"].unique() + return unique_names + +rule symlink_genomescope: + input: + gscope=lambda wildcards: expand(os.path.join(config['Results'], "1_evaluation/kmer_profiling/{name}/genomescope/results_summary.txt"), name = merylDB_basename()) + output: + os.path.join(config['Results'],"1_evaluation/{asmID}/KMER_PROFILING/genomescope/results_summary.txt"), + os.path.join(config['Results'], "1_evaluation/{asmID}/KMER_PROFILING/genomescope/results_log_plot.png"), + os.path.join(config['Results'], "1_evaluation/{asmID}/KMER_PROFILING/genomescope/results_linear_plot.png"), + os.path.join(config['Results'], "1_evaluation/{asmID}/KMER_PROFILING/genomescope/results_sizeEst.txt") + params: + merylDB_path=lambda wildcards: os.path.join(config['Results'], "1_evaluation/kmer_profiling", samples.loc[(wildcards.asmID), "merylDB_basename"]), + output_path=lambda wildcards: os.path.join(config['Results'], "1_evaluation", wildcards.asmID) + shell: + """ + # Create the target directories if they don't exist + mkdir -p {params.output_path}/KMER_PROFILING/genomescope + + # Remove the symlinks if they already exist + rm -rf {params.output_path}/KMER_PROFILING/genomescope/* + + # Symlink the directories + ln -sf {params.merylDB_path}/genomescope/* {params.output_path}/KMER_PROFILING/genomescope/ + """ + +rule symlink_smudgeplot: + input: + smudgeplot=expand(os.path.join(config['Results'], "1_evaluation/kmer_profiling/{name}/smudgeplot/results_summary_table.tsv"), name = merylDB_basename()) + output: + os.path.join(config['Results'],"1_evaluation/{asmID}/KMER_PROFILING/smudgeplot/results_summary_table.tsv"), + os.path.join(config['Results'],"1_evaluation/{asmID}/KMER_PROFILING/smudgeplot/results_smudgeplot.png") + params: + merylDB_path=lambda wildcards: os.path.join(config['Results'], "1_evaluation/kmer_profiling", samples.loc[(wildcards.asmID), "merylDB_basename"]), + output_path=lambda wildcards: os.path.join(config['Results'], "1_evaluation", wildcards.asmID), + shell: + """ + # Create the target directories if they don't exist + mkdir -p {params.output_path}/KMER_PROFILING/smudgeplot + + # Remove the symlinks if they already exist + rm -rf {params.output_path}/KMER_PROFILING/smudgeplot/* + + # Symlink the directories + ln -sf {params.merylDB_path}/smudgeplot/* {params.output_path}/KMER_PROFILING/smudgeplot/ + """ + + +def readfile(file_path): + with open(file_path) as f: + ff=f.readlines() + return ff[0][:-1] + + +rule gfastatsResults_asm1: + input: + assembly = os.path.join(config['Results'], "1_evaluation/{asmID}/ASSEMBLY_FASTAS/{asmID}.asm1.fasta"), + estGenomeFile = os.path.join(config['Results'], "1_evaluation/{asmID}/KMER_PROFILING/genomescope/results_sizeEst.txt") + output: + gfaResults=os.path.join(config['Results'],"1_evaluation/{asmID}/ASSEMBLY_STATISTICS/asm1/{asmID}_gfastats.txt") + params: + estGenome = lambda wildcards: dictSamples[wildcards.asmID][6] if dictSamples[wildcards.asmID][6] != 0 else readfile(os.path.join(config['Results'], f"1_evaluation/{wildcards.asmID}/KMER_PROFILING/genomescope/results_sizeEst.txt")) + conda: + os.path.join(workflow.basedir, "envs/AUXILIARY_R_SCRIPTS.yaml") + log: + os.path.join(config['Results'],"1_evaluation/{asmID}/logs/assemblyStats_gfastats_asm1.{asmID}.log") + threads: + resource['gfastatsResults']['threads'] + resources: + mem_mb=resource['gfastatsResults']['mem_mb'], + time=resource['gfastatsResults']['time'] + shell: + """ + (gfastats --nstar-report -j {threads} {input.assembly} {params.estGenome} > {output.gfaResults}) &> {log} + """ + +rule gfastatsResults_asm2: + input: + assembly = asm2File, + estGenomeFile = os.path.join(config['Results'], "1_evaluation/{asmID}/KMER_PROFILING/genomescope/results_sizeEst.txt") + output: + gfaResults=os.path.join(config['Results'],"1_evaluation/{asmID}/ASSEMBLY_STATISTICS/asm2/{asmID}_gfastats.txt") + params: + estGenome = lambda wildcards: dictSamples[wildcards.asmID][6] if dictSamples[wildcards.asmID][6] != 0 else readfile(os.path.join(config['Results'], f"1_evaluation/{wildcards.asmID}/KMER_PROFILING/genomescope/results_sizeEst.txt")) + conda: + os.path.join(workflow.basedir, "envs/AUXILIARY_R_SCRIPTS.yaml") + log: + os.path.join(config['Results'],"1_evaluation/{asmID}/logs/assemblyStats_gfastats_asm2.{asmID}.log") + threads: + resource['gfastatsResults']['threads'] + resources: + mem_mb=resource['gfastatsResults']['mem_mb'], + time=resource['gfastatsResults']['time'] + shell: + """ + (gfastats --nstar-report -j {threads} {input.assembly} {params.estGenome} > {output.gfaResults}) &> {log} + """ + + +rule all_metrics_table_asm1: + input: + gfaStats1=os.path.join(config['Results'],"1_evaluation/{asmID}/ASSEMBLY_STATISTICS/asm1/{asmID}_gfastats.txt"), + qv=os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.qv"), + busco_done = os.path.join(config['Results'], "1_evaluation/{asmID}/BUSCOs/asm1/busco.done"), + completeness=os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.completeness.stats") + output: + all_metrics_table = os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_all_metrics_asm1.tsv") + container: + None + params: + busco_asm1=lambda wildcards: busco_output_asm(wildcards, "asm1"), + script = os.path.join(workflow.basedir, "scripts/report/makeAllMetricsTable.sh") + shell: + """ + {params.script} {wildcards.asmID} {input.gfaStats1} {input.qv} {params.busco_asm1} {input.completeness} 1 {output.all_metrics_table} + """ + + +rule all_metrics_table_asm2: + input: + gfaStats1=os.path.join(config['Results'],"1_evaluation/{asmID}/ASSEMBLY_STATISTICS/asm2/{asmID}_gfastats.txt"), + qv=os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.qv"), + busco_done = os.path.join(config['Results'], "1_evaluation/{asmID}/BUSCOs/asm2/busco.done"), + completeness=os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.completeness.stats") + output: + all_metrics_table = os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_all_metrics_asm2.tsv") + container: + None + params: + busco_asm2=lambda wildcards: busco_output_asm(wildcards, "asm2"), + script = os.path.join(workflow.basedir, "scripts/report/makeAllMetricsTable.sh") + shell: + """ + {params.script} {wildcards.asmID} {input.gfaStats1} {input.qv} {params.busco_asm2} {input.completeness} 2 {output.all_metrics_table} + """ + +rule KeyResults_GenomescopeProfiles: + input: + gscopeSum=os.path.join(config['Results'], "1_evaluation/{asmID}/KMER_PROFILING/genomescope/results_summary.txt"), + gscopeLog=os.path.join(config['Results'], "1_evaluation/{asmID}/KMER_PROFILING/genomescope/results_log_plot.png"), + gscopeLin=os.path.join(config['Results'], "1_evaluation/{asmID}/KMER_PROFILING/genomescope/results_linear_plot.png") + output: + gscopeSum=os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/{asmID}_genomescope_summary.txt"), + gscopeLog=os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/{asmID}_genomescope_log_plot.png"), + gscopeLin=os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/{asmID}_genomescope_linear_plot.png") + container: + None + shell: + """ + cp {input.gscopeSum} {output.gscopeSum} + cp {input.gscopeLog} {output.gscopeLog} + cp {input.gscopeLin} {output.gscopeLin} + """ + +rule KeyResults_busco: + input: + busco_asm1 = os.path.join(config['Results'], "1_evaluation/{asmID}/BUSCOs/asm1/busco.done"), + busco_asm2 = lambda wildcards: os.path.join(config['Results'], "1_evaluation/{asmID}/BUSCOs/asm2/busco.done") if samples.loc[wildcards.asmID, "ALT_present"] else "scripts/standIN_files/ALT_missing.fasta", + params: + busco_asm1=lambda wildcards: busco_output_asm(wildcards, "asm1"), + busco_asm2=lambda wildcards: busco_output_asm(wildcards, "asm2") if samples.loc[wildcards.asmID, "ALT_present"] else "scripts/standIN_files/ALT_missing.fasta", + asm2provided=lambda wildcards: "true" if samples.loc[(wildcards.asmID), "ALT_present"] else "false", + buscoScores_asm2=os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/only_buscoScores_{asmID}_asm2.txt"), + busco_asm2_output=os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/short_summary.specific.{asmID}_asm2.txt") + output: + busco_asm1=os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/short_summary.specific.{asmID}_asm1.txt"), + buscoScores_asm1=os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/only_buscoScores_{asmID}_asm1.txt") + container: + None + shell: + """ + head -n 15 {params.busco_asm1} | tail -n 7 | sed "s/^['\t']*//" > {output.buscoScores_asm1} + cp {params.busco_asm1} {output.busco_asm1} + + if {params.asm2provided}; then + head -n 15 {params.busco_asm2} | tail -n 7 | sed "s/^['\t']*//" > {params.buscoScores_asm2} + cp {params.busco_asm2} {params.busco_asm2_output} + fi + """ + +rule KeyResults_merqury: + input: + qv=os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.qv"), + completeness=os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.completeness.stats"), + spectraStacked_asm1=os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.{asmID}.asm1.spectra-cn.st.png"), + spectraFlat_asm1=os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.{asmID}.asm1.spectra-cn.fl.png"), + spectraStacked_both=os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.spectra-cn.st.png"), + spectraFlat_both=os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.spectra-cn.fl.png") + output: + qv=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_merqOutput.qv"), + completeness=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_merqOutput.completeness.stats"), + spectraStacked_asm1=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_merqOutput.{asmID}.asm1.spectra-cn.st.png"), + spectraFlat_asm1=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_merqOutput.{asmID}.asm1.spectra-cn.fl.png"), + spectraStacked_both=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_merqOutput.spectra-cn.st.png"), + spectraFlat_both=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_merqOutput.spectra-cn.fl.png") + container: + None + shell: + """ + cp {input.completeness} {output.completeness} + cp {input.qv} {output.qv} + cp {input.spectraStacked_asm1} {output.spectraStacked_asm1} + cp {input.spectraFlat_asm1} {output.spectraFlat_asm1} + cp {input.spectraFlat_both} {output.spectraFlat_both} + cp {input.spectraStacked_both} {output.spectraStacked_both} + """ +rule KeyResults_smudgeplot: + input: + smudgeplot=os.path.join(config['Results'],"1_evaluation/{asmID}/KMER_PROFILING/smudgeplot/results_summary_table.tsv"), + smudgeplotPlot=os.path.join(config['Results'],"1_evaluation/{asmID}/KMER_PROFILING/smudgeplot/results_smudgeplot.png") + output: + smudgeplot=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/smudgeplot_summary_table.tsv"), + smudgeplotPlot=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/results_smudgeplot.png") + shell: + """ + cp {input.smudgeplot} {output.smudgeplot} + cp {input.smudgeplotPlot} {output.smudgeplotPlot} + """ + +rule KeyResults_statistics: + input: + gscopeSum=os.path.join(config['Results'], "1_evaluation/{asmID}/KMER_PROFILING/genomescope/results_summary.txt"), + all_metrics_table=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_all_metrics_asm1.tsv"), + all_metrics_asm2 = lambda wildcards: os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/{asmID}_all_metrics_asm2.tsv") if samples.loc[wildcards.asmID, "ALT_present"] else "scripts/standIN_files/ALT_missing.fasta", + gfastats_asm1=os.path.join(config['Results'],"1_evaluation/{asmID}/ASSEMBLY_STATISTICS/asm1/{asmID}_gfastats.txt"), + gfastats_asm2=lambda wildcards: os.path.join(config['Results'],"1_evaluation/{asmID}/ASSEMBLY_STATISTICS/asm2/{asmID}_gfastats.txt") if samples.loc[wildcards.asmID, "ALT_present"] else "scripts/standIN_files/ALT_missing.fasta", + params: + asm2provided=lambda wildcards: "true" if samples.loc[(wildcards.asmID), "ALT_present"] else "false", + run_smudgeplot= "true" if config['smudgeplot'] else "false", + gfastats_asm2=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_gfastats_asm2.tsv"), + busco=lambda wildcards: busco_output_asm(wildcards, "asm1"), + temp_all_metrics1=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_all_metrics_temp1.tsv"), + temp_all_metrics2=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_all_metrics_temp2.tsv") + output: + gfastats_asm1=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_gfastats_asm1.tsv"), + all_metrics_table = os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_all_metrics.tsv"), + all_metrics_table_values = os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_all_metrics_only_values.tsv") + container: + None + shell: + """ + cp {input.gfastats_asm1} {output.gfastats_asm1} + cp {input.all_metrics_table} {output.all_metrics_table} + + if {params.asm2provided}; then + cp {input.gfastats_asm2} {params.gfastats_asm2} + awk -F'\t' 'BEGIN {{OFS=FS}} NR == 1 {{$2 = $2 ".asm1"}} {{print}}' {input.all_metrics_table} > {params.temp_all_metrics1} + awk -F'\t' 'BEGIN {{OFS=FS}} NR == 1 {{$2 = $2 ".asm2"}} {{print}}' {input.all_metrics_asm2} > {params.temp_all_metrics2} + paste {params.temp_all_metrics1} <(cut -f 2- {params.temp_all_metrics2}) > {output.all_metrics_table} + rm {params.temp_all_metrics1} + rm {params.temp_all_metrics2} + fi + + cut -f 2- {output.all_metrics_table} > {output.all_metrics_table_values} + """ + +rule all_metrics_final_table: + input: + all_metrics_table=expand(os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_all_metrics_only_values.tsv"), asmID=list(dictSamples.keys())), + sampleSheet= config['samplesTSV'], + config=os.path.join(workflow.basedir, "configuration/config.yaml") + params: + temp_header=os.path.join(config['Results'],"1_evaluation/finalResults/tables/temp.tsv") + output: + results=os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_all_metrics.tsv"), + newSampleSheet=os.path.join(config['Results'],"1_evaluation/finalResults/saved_configuration/savedSampleSheet.tsv"), + newConfigFile=os.path.join(config['Results'],"1_evaluation/finalResults/saved_configuration/savedConfig.yaml") + container: + None + shell: + """ + + cp {input.sampleSheet} {output.newSampleSheet} + cp {input.config} {output.newConfigFile} + + # Create a header for the concatenated file + echo -e "ASM_ID\nTotal_bp\nGC_%\nScaf\nCont\nGaps\nGaps_bp\nGaps%\nGaps/Gbp\nLongest_scaf\nScaf_auN\nScaf_N50\nScaf_L50\nScaf_N90\nScaf_L90\nLongest_cont\nCont_auN\nCont_N50\nCont_L50\nCont_N90\nCont_L90\nqv\nKmer_Compl\nBUSCO_genes\nBUSCO_C\nBUSCO_S\nBUSCO_D\nBUSCO_F\nBUSCO_D" > {params.temp_header} + paste {params.temp_header} {input.all_metrics_table} > {output.results} + rm {params.temp_header} + """ + + +rule IndividualResults_md: + input: + os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_gfastats_asm1.tsv"), + os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/{asmID}_genomescope_linear_plot.png"), + os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/results_smudgeplot.png") if config["smudgeplot"] else os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/{asmID}_genomescope_log_plot.png"), + os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_merqOutput.qv"), + os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_merqOutput.completeness.stats"), + os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/only_buscoScores_{asmID}_asm1.txt"), + os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_merqOutput.spectra-cn.fl.png"), + os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_merqOutput.spectra-cn.st.png"), + os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_merqOutput.{asmID}.asm1.spectra-cn.st.png"), + os.path.join(config['Results'], "1_evaluation/{asmID}/KMER_PROFILING/genomescope/results_summary.txt") + conda: + os.path.join(workflow.basedir, "envs/AUXILIARY_PYTHON_SCRIPTS.yaml") + params: + "{asmID}", + "{kmer}", + busco_lineage, + lambda wildcards: (samples.loc[(wildcards.asmID), "ALT_present"] == True), + os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/{asmID}_gfastats_asm2.tsv"), + os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/only_buscoScores_{asmID}_asm2.txt"), + config['smudgeplot'] + output: + os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_k{kmer}_markdownForReport.md") + script: + os.path.join(workflow.basedir, "scripts/report/makePDF_indivMD.py") + + +rule PretextMaps_md: + input: + PretextMap=os.path.join(config['Results'], "1_evaluation/{asmID}/HiC_MAPS/{asmID}.HiC.COMBINED.FILTERED_FullMap.png") + output: + pretextCP2keyResults=os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/{asmID}.pretext_hiC_FullMap.png"), + IndividualPretextMD=os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_PRETEXT.md") + params: + assemblyName='{asmID}' + conda: + os.path.join(workflow.basedir, "envs/AUXILIARY_PYTHON_SCRIPTS.yaml") + script: + os.path.join(workflow.basedir, "scripts/report/pretextMapsToMarkdown.py") + + +rule Table_md_all_metrics: + input: + results=os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_all_metrics.tsv") + output: + os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_transpose.tsv") + conda: + os.path.join(workflow.basedir, "envs/AUXILIARY_PYTHON_SCRIPTS.yaml") + script: + os.path.join(workflow.basedir, "scripts/report/addFullTableForReport_all_metrics.py") + +rule ColouredTable_html: + input: + os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_transpose.tsv") + params: + os.path.join(workflow.basedir, "scripts/compare_results/colouredHeatmap_legend_metrics.tsv") + output: + os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_COLOURED.html"), + conda: + os.path.join(workflow.basedir, "envs/AUXILIARY_R_SCRIPTS.yaml") + script: + os.path.join(workflow.basedir, "scripts/compare_results/fullTable_heatmap_external_metrics.R") + +rule HeatmapTable_html: + input: + os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_transpose.tsv") + params: + os.path.join(workflow.basedir, "scripts/compare_results/internalComparison_legend_metrics.tsv") + output: + os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_GRADIENT.html") + conda: + os.path.join(workflow.basedir, "envs/AUXILIARY_R_SCRIPTS.yaml") + script: + os.path.join(workflow.basedir, "scripts/compare_results/fullTable_heatmap_internalComparison_metrics.R") + +rule BothTables_pdf: + input: + coloured=os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_COLOURED.html"), + gradient=os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_GRADIENT.html") + params: + css=os.path.join(workflow.basedir, "scripts/report/tableOnSamePage.css") + log: + os.path.join(config['Results'], "1_evaluation/logs/ComparisonTables_createPDF.log") + output: + coloured=os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_COLOURED.pdf"), + gradient=os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_GRADIENT.pdf") + conda: + os.path.join(workflow.basedir, "envs/AUXILIARY_PYTHON_SCRIPTS.yaml") + shell: + """ + (pandoc -V papersize:a3 -V margin-top=1.5cm -V margin-left=1.5cm -V margin-right=0 -V margin-bottom=0 -c {params.css} -o {output.coloured} --pdf-engine=wkhtmltopdf {input.coloured}) &>> {log} + (pandoc -V papersize:b3 -V margin-top=1.5cm -V margin-left=1.5cm -V margin-right=0 -V margin-bottom=0 -c {params.css} \ + -o {output.gradient} --pdf-engine=wkhtmltopdf --pdf-engine-opt="-O" --pdf-engine-opt="Landscape" {input.gradient}) &>> {log} + """ + +rule Reports_md: + input: + indivMD=[expand(os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_k{kmer}_markdownForReport.md"), asmID=key, kmer=value6) for key, [value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12, value13, value14, value15, value16, value17] in dictSamples.items()], + landingPage=os.path.join(workflow.basedir, "scripts/report/reportLandingPage.md"), + IndividualPretextMD=[expand(os.path.join(config['Results'],"1_evaluation/{asmID}/KEY_RESULTS/{asmID}_PRETEXT.md"), asmID=list(testDictPRETEXT.keys()))] + output: + FullMarkdown=os.path.join(config['Results'],"1_evaluation/finalResults/individual_reports/ALL_individual_REPORTS.md") + container: + None + shell: + """ + cat {input.landingPage} {input.indivMD} {input.IndividualPretextMD} >> {output.FullMarkdown} + """ + + +rule Reports_pdf: + input: + md_report=os.path.join(config['Results'],"1_evaluation/finalResults/individual_reports/ALL_individual_REPORTS.md") + output: + pdf_report=os.path.join(config['Results'],"1_evaluation/finalResults/individual_reports/ALL_individual_REPORTS.pdf") + log: + os.path.join(config['Results'], "1_evaluation/logs/ReportWithoutComparisonTables_createPDF.log") + conda: + os.path.join(workflow.basedir, "envs/AUXILIARY_PYTHON_SCRIPTS.yaml") + shell: + """ + (pandoc -o {output.pdf_report} {input.md_report} --pdf-engine=tectonic) &>> {log} + """ + + +rule ConcatAll_pdfs: + input: + pdf_report=os.path.join(config['Results'],"1_evaluation/finalResults/individual_reports/ALL_individual_REPORTS.pdf"), + coloured=os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_COLOURED.pdf"), + gradient=os.path.join(config['Results'],"1_evaluation/finalResults/tables/TABLE_OF_RESULTS_GRADIENT.pdf") + output: + pdf_report=os.path.join(config['Results'],"1_evaluation/finalResults/GEP_FINAL_REPORT.pdf"), + log: + os.path.join(config['Results'], "1_evaluation/logs/ConcatAll_pdfs.log") + conda: + os.path.join(workflow.basedir, "envs/AUXILIARY_PYTHON_SCRIPTS.yaml") + shell: + """ + (gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile={output.pdf_report} {input.pdf_report} {input.gradient} {input.coloured}) &>> {log} + """ + +rule make_ear: + input: + expand(os.path.join(config['Results'], "1_evaluation/kmer_profiling/{name}/genomescope/results_summary.txt"), name = merylDB_basename()), + [expand(os.path.join(config['Results'], "1_evaluation/{asmID}/KEY_RESULTS/short_summary.specific.{asmID}_asm1.txt"), asmID = key) for key, values in dictSamples.items()], + [expand(os.path.join(config['Results'],"1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA/{asmID}_merqOutput.qv"), asmID = key) for key, values in dictSamples.items()], + [expand(os.path.join(config['Results'],"1_evaluation/{asmID}/ASSEMBLY_STATISTICS/asm1/{asmID}_gfastats.txt"), asmID = key) for key, values in dictSamples.items()] + output: + os.path.join(config['Results'],"1_evaluation/finalResults/EAR_report.yaml") + conda: + os.path.join(workflow.basedir, "envs/AUXILIARY_PYTHON_SCRIPTS.yaml") + params: + samples_tsv_path = config['samplesTSV'], + merylDB = samples["merylDB_basename"].unique()[0], + busco = samples["busco_lineage"].unique()[0], + results_folder = config['Results'], + script = os.path.join(workflow.basedir, "scripts/report/make_erga_assembly_report.py"), + smudgeplot = config['smudgeplot'] + log: + os.path.join(config['Results'], "1_evaluation/logs/ear_report.log") + shell: + """ + python {params.script} {output} {params.samples_tsv_path} {params.merylDB} {params.busco} {params.results_folder} {params.smudgeplot} + """ diff --git a/scripts/ALT_missing.fasta b/scripts/ALT_missing.fasta new file mode 100644 index 0000000..e69de29 diff --git a/scripts/addFullTableForReport.py b/scripts/addFullTableForReport.py new file mode 100644 index 0000000..6ac763b --- /dev/null +++ b/scripts/addFullTableForReport.py @@ -0,0 +1,26 @@ +import pandas as pd +from tabulate import tabulate +samples=pd.read_csv(snakemake.input.results, dtype=str, index_col=0, delim_whitespace=True, skip_blank_lines=True) +# samples=samples.reset_index +turn2FloatAndMb=['Bases_Mb','Est_Size_Mb','Longest_Scaff_Mb','Scaff_N50_Mb','Scaff_NG50_Mb','Scaff_N95_Mb','Scaff_NG95_Mb','Longest_Cont_Mb','Cont_N50_Mb','Cont_NG50_Mb','Cont_N95_Mb','Cont_NG95_Mb'] +roundDecimals=['Comp_BUSCOs_%','Comp_Single_BUSCOs_%','Het_%','GC_%','QV','Completeness','Bases_Mb','Est_Size_Mb','Longest_Scaff_Mb','Scaff_N50_Mb','Scaff_NG50_Mb','Scaff_N95_Mb','Scaff_NG95_Mb','Longest_Cont_Mb','Cont_N50_Mb','Cont_NG50_Mb','Cont_N95_Mb','Cont_NG95_Mb'] +print('this is samples',samples) +sampleTransposed=samples.T +print('this is sampleTransposed',sampleTransposed) +for header in turn2FloatAndMb: + sampleTransposed[header]=sampleTransposed[header].astype(float).div(1000000) +for i in range(0,4): + sampleTransposed[roundDecimals[i]]=sampleTransposed[roundDecimals[i]].str.replace('%','') +for roundHeader in roundDecimals: + sampleTransposed[roundHeader]=sampleTransposed[roundHeader].astype(float).round(2) +with open(snakemake.output[0], "w") as out_markdown: + print("", file=out_markdown) + print("\\blandscape", file=out_markdown) + print("", file=out_markdown) + print("\\tiny", file=out_markdown) + print(tabulate(sampleTransposed.rename_axis('ASM_ID'), headers='keys',tablefmt="pipe", showindex=True), file=out_markdown) + print("\\elandscape", file=out_markdown) + print("", file=out_markdown) + +with open(snakemake.output[1], "w") as out_plain: + print(tabulate(sampleTransposed.rename_axis('ASM_ID'), headers='keys',tablefmt="plain", showindex=True), file=out_plain) diff --git a/scripts/assembly_stats/stats.py b/scripts/assembly_stats/stats.py new file mode 100644 index 0000000..497b6e8 --- /dev/null +++ b/scripts/assembly_stats/stats.py @@ -0,0 +1,368 @@ +import numpy as np +from itertools import groupby +import json +import sys +import csv +import os +import pandas as pd +from tabulate import tabulate + +#pd.set_option('display.float_format', lambda x: '%,g' % x) +with open(sys.argv[2]) as f: + ff=f.readlines() + +# estSizeFile = open() +# +# size_bp = estSizeFile.readlines() + +estSize=int(float(sys.argv[4])) + +if estSize == 0: + size_bp = ff[0][:-1] + print("No estimated Genome Size provided, using estimation from Genomescope2: ", size_bp) +else: + print("Estimated Genome Size provided as:", estSize, " using this size for NG and LG stats") + size_bp = estSize + +def fasta_iter(fasta_file): + """ + Takes a FASTA file, and produces a generator of Header and Sequences. + This is a memory-efficient way of analyzing a FASTA files -- without + reading the entire file into memory. + + Parameters + ---------- + fasta_file : str + The file location of the FASTA file + + Returns + ------- + header: str + The string contained in the header portion of the sequence record + (everything after the '>') + seq: str + The sequence portion of the sequence record + """ + + fh = open(fasta_file) + fa_iter = (x[1] for x in groupby(fh, lambda line: line[0] == ">")) + for header in fa_iter: + # drop the ">" + header = next(header)[1:].strip() + # join all sequence lines to one. + seq = "".join(s.upper().strip() for s in next(fa_iter)) + yield header, seq + + +def read_genome(fasta_file): + """ + Takes a FASTA file, and produces 2 lists of sequence lengths. It also + calculates the GC Content, since this is the only statistic that is not + calculated based on sequence lengths. + + Parameters + ---------- + fasta_file : str + The file location of the FASTA file + + Returns + ------ + contig_lens: list + A list of lengths of all contigs in the genome. + scaffold_lens: list + A list of lengths of all scaffolds in the genome. + gc_cont: float + The percentage of total basepairs in the genome that are either G or C. + """ + + gc = 0 + total_len = 0 + contig_lens = [] + scaffold_lens = [] + for _, seq in fasta_iter(fasta_file): + scaffold_lens.append(len(seq)) + if "NN" in seq: + contig_list = seq.split("NN") + else: + contig_list = [seq] + for contig in contig_list: + if len(contig): + gc += contig.count('G') + contig.count('C') + total_len += len(contig) + contig_lens.append(len(contig)) + gc_cont = (gc / total_len) * 100 +# print("this is gc_content", gc_cont) + return contig_lens, scaffold_lens, gc_cont + + +def calculate_stats(seq_lens, gc_cont): + naming = sys.argv[3] + stats = {} + seq_array = np.array(seq_lens) +# stats['Assembly:']=naming + stats['sequence_count'] = seq_array.size + testsize=stats['sequence_count'] + stats['number_of_gaps'] = 0 +# print("this is the count",naming," ", testsize) + stats['gc_content (%)'] = gc_cont + sorted_lens = seq_array[np.argsort(-seq_array)] + stats['longest (bp)'] = int(sorted_lens[0]) + testlongest= stats['longest (bp)'] +# print("this is the longest", naming," ",testlongest) + stats['shortest (bp)'] = int(sorted_lens[-1]) +# stats['median'] = np.median(sorted_lens) +# stats['mean'] = np.mean(sorted_lens) + stats['total_bps (bp)'] = int(np.sum(sorted_lens)) + testprint=stats['total_bps (bp)'] +# print("total_bp is", naming," ",testprint) + stats['estimated_size (bp)'] = int(size_bp) + csum = np.cumsum(sorted_lens) + # if stats['total_bps (bp)'] < stats['estimated_size (bp)']: + # csum_ng = np.append(csum, stats['estimated_size (bp)']) + # else: + # csum_ng=csum + for level in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 96, 97, 98, 99, 100]: + nx = int(stats['total_bps (bp)'] * (level / 100)) + csumn = min(csum[csum >= nx]) + l_level = int(np.where(csum == csumn)[0]) + 1 + stats['L' + str(level)] = l_level + for level in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 96, 97, 98, 99, 100]: +# print("the totalbps are:", stats['total_bps (bp)']) + nx = int(stats['total_bps (bp)'] * (level / 100)) +# print("this is the nx", nx) +# print("this is the csum", csum) + csumn = min(csum[csum >= nx]) +# print("this is the csumn", csumn) + l_level = int(np.where(csum == csumn)[0]) + n_level = int(sorted_lens[l_level]) + stats['N' + str(level)] = n_level +# print(level, " ", n_level) + for level in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 96, 97, 98, 99, 100]: +# print("the estbps are:", stats['estimated_size (bp)']) + ngx = int(stats['estimated_size (bp)'] * (level / 100)) +# print("this is the ngx", ngx) +# print("this is the csum", csum_ng) +# print("this is the csum", csum) +# print("this is the [csum >= ngx]", np.array(csum >= ngx)) + new_array=np.array(csum >= ngx) + # print(np.any(new_array)) + if np.any(new_array) == False: + csumng = csum[seq_array.size-1] + # print("this is the csumng", csumng) + lg_level = int(np.where(csum == csumng)[0]) + 1 + stats['LG' + str(level)] = lg_level + elif np.any(new_array) == True: + csumng = min(csum[csum >= ngx]) + # print("this is the csumng", csumng) + lg_level = int(np.where(csum == csumng)[0]) + 1 + stats['LG' + str(level)] = lg_level + for level in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 96, 97, 98, 99, 100]: + ngx = int(stats['estimated_size (bp)'] * (level / 100)) + new_array=np.array(csum >= ngx) + # print(np.any(new_array)) + if np.any(new_array) == False: + csumng = csum[seq_array.size-1] + # print("this is the csumng", csumng) + lg_level = int(np.where(csum == csumng)[0]) + ng_level = int(sorted_lens[lg_level]) + stats['NG' + str(level)] = ng_level + elif np.any(new_array) == True: + csumng = min(csum[csum >= ngx]) + # print("this is the csumng", csumng) + lg_level = int(np.where(csum == csumng)[0]) + ng_level = int(sorted_lens[lg_level]) + stats['NG' + str(level)] = ng_level + return stats + + +if __name__ == "__main__": +# print(size_bp) +# print(type(size_bp)) + naming = sys.argv[3] + infilename = sys.argv[1] + contig_lens, scaffold_lens, gc_cont = read_genome(infilename) +# contig_stats = calculate_stats(contig_lens, gc_cont) + scaffold_stats = calculate_stats(scaffold_lens, gc_cont) + rounded_gc = round(gc_cont, 2) +# print("this is the rounded_gc: ", rounded_gc) + contig_stats = calculate_stats(contig_lens, gc_cont) + gaps=contig_stats.get('sequence_count') - scaffold_stats.get('sequence_count') + scaffold_stats['number_of_gaps'] = gaps + contig_stats['number_of_gaps'] = gaps +# print("this is the scaffold_stats: ", scaffold_stats) +# print(scaffold_stats) +# df_scaffold_all= pd.DataFrame.from_dict(scaffold_stats, orient= 'index') +# print(df_scaffold_all) +# df2 = pd.DataFrame([scaffold_stats]).T +# print(df2) +# s = pd.Series(scaffold_stats, name=naming) +# s.index.name = 'Assembly:' +# s.reset_index() +# print(s) + scaff_seq=pd.DataFrame(scaffold_stats.items(), columns=['Assembly:', naming]) +# print("this is the scaff_seq: ", scaff_seq) + df_scaffold_top=scaff_seq.iloc[0:7,0:2] +# print("this is GC CONTENT", gc_cont) + df_scaffold_top[naming]=df_scaffold_top[naming].astype('int64').apply('{:,}'.format) +# print("this is the top: ", df_scaffold_top) + df_scaffold_top[naming][2] = rounded_gc +# print("this is df_scaffold_top[naming][2]", df_scaffold_top[naming][2]) +# print("this is the top: ", df_scaffold_top) + # df_scaffold_top=df_scaffold_top.style.hide_index() +# df_scaffold_top[naming].round(decimals=0) + # df_contig_all=pd.DataFrame(data=contig_stats) + # df_contig_top=df_contig_all.iloc[0:6,0:2] + df_scaffold_Nxx=pd.DataFrame(scaffold_stats.items(), columns=['Nxx Level (%)', 'Length of Nxx Scaffold (bp)']) + df_scaffold_Nxx=df_scaffold_Nxx.iloc[31:55,0:2] + df_scaffold_Nxx=df_scaffold_Nxx.reset_index() +# print(df_scaffold_Nxx) + df_scaffold_NGxx=pd.DataFrame(scaffold_stats.items(), columns=['NGxx Level (%)', 'Length of NGxx Scaffold (bp)']) + df_scaffold_NGxx=df_scaffold_NGxx.iloc[79:104,0:2] + df_scaffold_NGxx=df_scaffold_NGxx.reset_index() +# print(df_scaffold_NGxx) + df_scaffold_N_NG=pd.concat([df_scaffold_Nxx,df_scaffold_NGxx], axis=1) + df_scaffold_N_NG=df_scaffold_N_NG.drop(df_scaffold_N_NG.columns[[0,3]], axis = 1) + df_scaffold_N_NG['Length of Nxx Scaffold (bp)']=df_scaffold_N_NG['Length of Nxx Scaffold (bp)'].astype('int64').apply('{:,}'.format) + df_scaffold_N_NG['Length of NGxx Scaffold (bp)']=df_scaffold_N_NG['Length of NGxx Scaffold (bp)'].astype('int64').apply('{:,}'.format) + # df_scaffold_N_NG=df_scaffold_N_NG.style.hide_index() + df_scaffold_Lxx=pd.DataFrame(scaffold_stats.items(), columns=['Lxx Level (%)', 'Count of scaffolds (for Lxx Level)']) + df_scaffold_Lxx=df_scaffold_Lxx.iloc[7:31,0:2] + df_scaffold_Lxx=df_scaffold_Lxx.reset_index() +# print(df_scaffold_Nxx) + df_scaffold_LGxx=pd.DataFrame(scaffold_stats.items(), columns=['LGxx Level (%)', 'Count of scaffolds (for LGxx Level)']) + df_scaffold_LGxx=df_scaffold_LGxx.iloc[55:79,0:2] + df_scaffold_LGxx=df_scaffold_LGxx.reset_index() +# print(df_scaffold_NGxx) + df_scaffold_L_LG=pd.concat([df_scaffold_Lxx,df_scaffold_LGxx], axis=1) + df_scaffold_L_LG=df_scaffold_L_LG.drop(df_scaffold_L_LG.columns[[0,3]], axis = 1) + df_scaffold_L_LG['Count of scaffolds (for Lxx Level)']=df_scaffold_L_LG['Count of scaffolds (for Lxx Level)'].astype('int64').apply('{:,}'.format) + df_scaffold_L_LG['Count of scaffolds (for LGxx Level)']=df_scaffold_L_LG['Count of scaffolds (for LGxx Level)'].astype('int64').apply('{:,}'.format) +################################################################################################################ + + contig_seq=pd.DataFrame(contig_stats.items(), columns=['Assembly:', naming]) + df_contig_top=contig_seq.iloc[0:7,0:2] + df_contig_top[naming]=df_contig_top[naming].astype('int64').apply('{:,}'.format) + df_contig_top[naming][2] = rounded_gc + # df_scaffold_top=df_scaffold_top.style.hide_index() +# df_scaffold_top[naming].round(decimals=0) + # df_contig_all=pd.DataFrame(data=contig_stats) + # df_contig_top=df_contig_all.iloc[0:6,0:2] + df_contig_Nxx=pd.DataFrame(contig_stats.items(), columns=['Nxx Level (%)', 'Length of Nxx contig (bp)']) + df_contig_Nxx=df_contig_Nxx.iloc[31:55,0:2] + df_contig_Nxx=df_contig_Nxx.reset_index() +# print(df_scaffold_Nxx) + df_contig_NGxx=pd.DataFrame(contig_stats.items(), columns=['NGxx Level (%)', 'Length of NGxx contig (bp)']) + df_contig_NGxx=df_contig_NGxx.iloc[79:104,0:2] + df_contig_NGxx=df_contig_NGxx.reset_index() +# print(df_scaffold_NGxx) + df_contig_N_NG=pd.concat([df_contig_Nxx,df_contig_NGxx], axis=1) + df_contig_N_NG=df_contig_N_NG.drop(df_contig_N_NG.columns[[0,3]], axis = 1) + df_contig_N_NG['Length of Nxx contig (bp)']=df_contig_N_NG['Length of Nxx contig (bp)'].astype('int64').apply('{:,}'.format) + df_contig_N_NG['Length of NGxx contig (bp)']=df_contig_N_NG['Length of NGxx contig (bp)'].astype('int64').apply('{:,}'.format) + # df_scaffold_N_NG=df_scaffold_N_NG.style.hide_index() + df_contig_Lxx=pd.DataFrame(contig_stats.items(), columns=['Lxx Level (%)', 'Count of contig (for Lxx Level)']) + df_contig_Lxx=df_contig_Lxx.iloc[7:31,0:2] + df_contig_Lxx=df_contig_Lxx.reset_index() +# print(df_scaffold_Nxx) + df_contig_LGxx=pd.DataFrame(contig_stats.items(), columns=['LGxx Level (%)', 'Count of contig (for LGxx Level)']) + df_contig_LGxx=df_contig_LGxx.iloc[55:79,0:2] + df_contig_LGxx=df_contig_LGxx.reset_index() +# print(df_scaffold_NGxx) + df_contig_L_LG=pd.concat([df_contig_Lxx,df_contig_LGxx], axis=1) + df_contig_L_LG=df_contig_L_LG.drop(df_contig_L_LG.columns[[0,3]], axis = 1) + df_contig_L_LG['Count of contig (for Lxx Level)']=df_contig_L_LG['Count of contig (for Lxx Level)'].astype('int64').apply('{:,}'.format) + df_contig_L_LG['Count of contig (for LGxx Level)']=df_contig_L_LG['Count of contig (for LGxx Level)'].astype('int64').apply('{:,}'.format) + # df_scaffold_L_LG=df_scaffold_L_LG.style.hide_index() + # print(df_contig_top) +# print(scaffold_stats) +# stat_output = {'Contig Stats': contig_stats, +# 'Scaffold Stats': scaffold_stats} +# print(json.dumps(stat_output, indent=2, sort_keys=False)) +# scaffold_out = {scaffold_stats} +# contig_out = {contig_stats} + + +# print(json.dumps(scaffold_stats, indent=2, sort_keys=False)) +# print(json.dumps(contig_stats, indent=2, sort_keys=False)) + # dict_writer = csv.DictWriter(stat_output, fieldnames=None, delimiter='\t') + # print(dict_writer) +# df_raw = pd.DataFrame.from_dict(data, orient='index') + # print(df_raw) +# with open('statdata.txt', 'w') as outfile: +# json.dump(scaffold_stats, outfile) +# with open('contigdata.txt', 'w') as out2file: +# json.dump(contig_stats, out2file) + # scaff = csv.writer(open(naming + "_scaffold_stats.tsv", "w"), delimiter='\t') + # for key, val in scaffold_stats.items(): + # scaff.writerow([key, val]) + # + # contig = csv.writer(open(naming + "_contig_stats.tsv", "w"), delimiter='\t') + # for key, val in contig_stats.items(): + # + with open(sys.argv[5], 'w') as outputfile: +# # print('#' + libraryName, file=outputfile) +# # print("Total Reads Processed (Paired): " + total_processed + " ( 100 %)", file=outputfile) +# # print("Discarded reads (Paired): " + discarded + " ( "+str(discarded_perc)+"%)", file=outputfile) +# # print("Successfully Processed reads (Paired): " + successfull + " ( "+str(successfull_perc)+"%)", file=outputfile) + print(df_scaffold_top.to_string(index=False), file=outputfile) + print("", file=outputfile) + print(df_scaffold_N_NG.to_string(index=False), file=outputfile) + print("", file=outputfile) + print(df_scaffold_L_LG.to_string(index=False), file=outputfile) + + with open(sys.argv[6], 'w') as outputfile2: +# # print('#' + libraryName, file=outputfile) +# # print("Total Reads Processed (Paired): " + total_processed + " ( 100 %)", file=outputfile) +# # print("Discarded reads (Paired): " + discarded + " ( "+str(discarded_perc)+"%)", file=outputfile) +# # print("Successfully Processed reads (Paired): " + successfull + " ( "+str(successfull_perc)+"%)", file=outputfile) + print(df_contig_top.to_string(index=False), file=outputfile2) + print("", file=outputfile2) + print(df_contig_N_NG.to_string(index=False), file=outputfile2) + print("", file=outputfile2) + print(df_contig_L_LG.to_string(index=False), file=outputfile2) + + # with open(sys.argv[4], 'w') as outputRst: + # print(tabulate(df_scaffold_top, headers='keys',tablefmt="rst", showindex=False), file=outputRst) + # print("", file=outputRst) + # print(tabulate(df_scaffold_N_NG, headers='keys',tablefmt="rst", showindex=False), file=outputRst) + # print("", file=outputRst) + # print(tabulate(df_scaffold_L_LG, headers='keys',tablefmt="rst", showindex=False), file=outputRst) + # print("", file=outputRst) + # # + # with open(sys.argv[5], 'w') as outputRst2: + # print(tabulate(df_contig_top, headers='keys',tablefmt="rst", showindex=False), file=outputRst2) + # print("", file=outputRst2) + # print(tabulate(df_contig_N_NG, headers='keys',tablefmt="rst", showindex=False), file=outputRst2) + # print("", file=outputRst2) + # print(tabulate(df_contig_L_LG, headers='keys',tablefmt="rst", showindex=False), file=outputRst2) + # print("", file=outputRst2) + + # with open(sys.argv[4], 'w') as outputRst: + # print(tabulate(df_scaffold_top, headers='keys',tablefmt="pipe", showindex=False), file=outputRst) + # print("", file=outputRst) + # print(tabulate(df_scaffold_N_NG, headers='keys',tablefmt="pipe", showindex=False), file=outputRst) + # print("", file=outputRst) + # print(tabulate(df_scaffold_L_LG, headers='keys',tablefmt="pipe", showindex=False), file=outputRst) + # print("", file=outputRst) + # # + # with open(sys.argv[5], 'w') as outputRst2: + # print(tabulate(df_contig_top, headers='keys',tablefmt="pipe", showindex=False), file=outputRst2) + # print("", file=outputRst2) + # print(tabulate(df_contig_N_NG, headers='keys',tablefmt="pipe", showindex=False), file=outputRst2) + # print("", file=outputRst2) + # print(tabulate(df_contig_L_LG, headers='keys',tablefmt="pipe", showindex=False), file=outputRst2) + # print("", file=outputRst2) + # list_of_dfs=[df_scaffold_top,df_scaffold_N_NG,df_scaffold_L_LG] + # for df in list_of_dfs: + # with open('all_dfs.tsv','a') as f: + # df.to_csv(f) + # f.write("\n") +# with open("testok.tsv", 'w') as outputfile: +# # print('#' + libraryName, file=outputfile) +# # print("Total Reads Processed (Paired): " + total_processed + " ( 100 %)", file=outputfile) +# # print("Discarded reads (Paired): " + discarded + " ( "+str(discarded_perc)+"%)", file=outputfile) +# # print("Successfully Processed reads (Paired): " + successfull + " ( "+str(successfull_perc)+"%)", file=outputfile) +# print(tabulate(loadinTop, headers='keys',tablefmt="rst", showindex=False), file=outputfile) +# # print('', file=outputfile) +# print(tabulate(result, headers='keys', tablefmt="psql", showindex=False), file=outputfile) +# print('', file=outputfile) diff --git a/scripts/colouredHeatmap_legend.tsv b/scripts/colouredHeatmap_legend.tsv new file mode 100644 index 0000000..526c788 --- /dev/null +++ b/scripts/colouredHeatmap_legend.tsv @@ -0,0 +1,5 @@ +Gaps_per_Gb Scaff_NG50_Mb Cont_NG50_Mb QV Completeness Comp_Single_BUSCOs_% +'< 200' '> 100Mbp' '> 10Mbp' '> 50' '> 95%' '> 95%' +'200 - 1000' '10Mbp - 100Mbp' '1Mbp - 10Mbp' '40 - 50' '90% - 95%' '90% - 95%' +'1000 - 10000' '0.1Mbp - 10Mbp' '0.01Mbp - 1Mbp' '35 - 40' '80% - 90%' '80% - 90%' +'> 10000' '< 0.1Mbp' '< 0.01Mbp' '< 35' '< 80%' '< 80%' diff --git a/scripts/compare_results/colouredHeatmap_legend_metrics.tsv b/scripts/compare_results/colouredHeatmap_legend_metrics.tsv new file mode 100644 index 0000000..8775535 --- /dev/null +++ b/scripts/compare_results/colouredHeatmap_legend_metrics.tsv @@ -0,0 +1,6 @@ +Gaps_per_Gb Scaf_N50 Cont_N50 qv Kmer_Compl Comp_Single_BUSCOs_% +'< 200' '> 100Mbp' '> 10Mbp' '> 50' '> 95%' '> 95%' +'200 - 1000' '10Mbp - 100Mbp' '1Mbp - 10Mbp' '40 - 50' '90% - 95%' '90% - 95%' +'1000 - 10000' '0.1Mbp - 10Mbp' '0.01Mbp - 1Mbp' '35 - 40' '80% - 90%' '80% - 90%' +'> 10000' '< 0.1Mbp' '< 0.01Mbp' '< 35' '< 80%' '< 80%' + diff --git a/scripts/compare_results/fullTable_heatmap_external_metrics.R b/scripts/compare_results/fullTable_heatmap_external_metrics.R new file mode 100644 index 0000000..b57d3cd --- /dev/null +++ b/scripts/compare_results/fullTable_heatmap_external_metrics.R @@ -0,0 +1,144 @@ +suppressMessages(library(dplyr)) +suppressMessages(library(formattable)) + +fullTableOfStats<-read.table(file = snakemake@input[[1]], sep = "", header = TRUE, row.names=NULL, check.names = FALSE) + +fullTableOfStats$Gaps_per_Gb <- ((fullTableOfStats$Gaps / fullTableOfStats$Total_bp) *1000000000) +fullTableOfStats$Gaps_per_Gb <- as.integer(fullTableOfStats$Gaps_per_Gb) + + +customPurple='#c699e8' +customGreen='#8fc773' + +selectionOfStats_colouredHeatmap <- fullTableOfStats %>% + select(c('ASM_ID', + 'Gaps_per_Gb', 'Scaf_N50', + 'Cont_N50','qv', + 'Kmer_Compl', "Comp_Single_BUSCOs_%" = BUSCO_S, "BUSCO_genes")) %>% + mutate("Comp_Single_BUSCOs_%" = as.numeric(sub("%", "", as.character(`Comp_Single_BUSCOs_%`)))) + +sink(file = snakemake@output[[1]]) + + +#temp + +ASM_LEVEL = 'scaff' +format_table(selectionOfStats_colouredHeatmap, + align =c("l","c","c","c","c", "c", "c", "c", "c"), + list(Gaps_per_Gb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(ASM_LEVEL == 'cont', "#666666", + ifelse(between(Gaps_per_Gb,1001, 10000), "#FFCC99", + ifelse(between(Gaps_per_Gb,201 , 1000), customGreen, + ifelse(Gaps_per_Gb <= 200, customPurple, "#FF9999")))))), + Scaf_N50 = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(between(Scaf_N50,0.1, 9.99999), "#FFCC99", + ifelse(between(Scaf_N50,10, 99.99999), customGreen, + ifelse(Scaf_N50 >= 100, customPurple, "#FF9999"))))), + Scaf_L50 = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(between(Scaf_L50,0.1, 9.99999), "#FFCC99", + ifelse(between(Scaf_L50,10, 99.99999), customGreen, + ifelse(Scaf_L50 >= 100, customPurple, "#FF9999"))))), + Cont_N50 = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(between(Cont_N50,0.01, 0.99999), "#FFCC99", + ifelse(between(Cont_N50,1, 9.999999), customGreen, + ifelse(Cont_N50 >= 10, customPurple, "#FF9999"))))), + Kmer_Compl = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(between(Kmer_Compl,80, 89.9999), "#FFCC99", + ifelse(between(Kmer_Compl,90, 94.99999), customGreen, + ifelse(Kmer_Compl >= 95, customPurple, "#FF9999"))))), + qv = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(between(qv,35, 39.9999999999), "#FFCC99", + ifelse(between(qv,40, 49.999999999), customGreen, + ifelse(qv >= 50, customPurple, "#FF9999"))))), + + `Comp_Single_BUSCOs_%` = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(between(`Comp_Single_BUSCOs_%`,80, 89.9999), "#FFCC99", + ifelse(between(`Comp_Single_BUSCOs_%`,90, 94.99999), customGreen, + ifelse(`Comp_Single_BUSCOs_%` >= 95, customPurple, "#FF9999"))))))) + +cat('<br>') +cat("\n") +cat('<br>') + +legendTable<-read.table(file = snakemake@params[[1]], sep = "", header = TRUE, row.names=NULL, check.names = FALSE) + +format_table(legendTable, + align =c("c","c","c", "c", "c", "c", "c"), + list(Gaps_per_Gb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + "font.size" = "12px", + "width" = '150px', + `background-color` = ifelse(Gaps_per_Gb == '> 10000',"#FF9999", + ifelse(Gaps_per_Gb == '1000 - 10000',"#FFCC99", + ifelse(Gaps_per_Gb == '200 - 1000',customGreen, customPurple))))), + Scaf_N50 = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + "font.size" = "12px", + "width" = '150px', + `background-color` = ifelse(Scaf_N50 == '< 0.1Mbp',"#FF9999", + ifelse(Scaf_N50 == '0.1Mbp - 10Mbp',"#FFCC99", + ifelse(Scaf_N50 == '10Mbp - 100Mbp',customGreen, customPurple))))), + Cont_N50 = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + "font.size" = "12px", + "width" = '150px', + `background-color` = ifelse(Cont_N50 == '< 0.01Mbp',"#FF9999", + ifelse(Cont_N50 == '0.01Mbp - 1Mbp',"#FFCC99", + ifelse(Cont_N50 == '1Mbp - 10Mbp',customGreen, customPurple))))), + qv = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + "font.size" = "12px", + "width" = '100px', + `background-color` = ifelse(qv == '< 35',"#FF9999", + ifelse(qv == '35 - 40',"#FFCC99", + ifelse(qv == '40 - 50',customGreen, customPurple))))), + Kmer_Compl = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + "font.size" = "12px", + "width" = '150px', + `background-color` = ifelse(Kmer_Compl == '< 80%',"#FF9999", + ifelse(Kmer_Compl == '80% - 90%',"#FFCC99", + ifelse(Kmer_Compl == '90% - 95%',customGreen, customPurple))))), + `Comp_Single_BUSCOs_%` = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + "font.size" = "12px", + "width" = '220px', + `background-color` = ifelse(`Comp_Single_BUSCOs_%` == '< 80%',"#FF9999", + ifelse(`Comp_Single_BUSCOs_%` == '80% - 90%',"#FFCC99", + ifelse(`Comp_Single_BUSCOs_%` == '90% - 95%',customGreen, customPurple))))))) +sink(file = NULL) + + diff --git a/scripts/compare_results/fullTable_heatmap_internalComparison_metrics.R b/scripts/compare_results/fullTable_heatmap_internalComparison_metrics.R new file mode 100644 index 0000000..edef08f --- /dev/null +++ b/scripts/compare_results/fullTable_heatmap_internalComparison_metrics.R @@ -0,0 +1,147 @@ +suppressMessages(library(dplyr)) +suppressMessages(library(formattable)) + +fullTableOfStats<-read.table(file = snakemake@input[[1]], sep = "", header = TRUE, row.names=NULL, check.names = FALSE) +fullTableOfStats$Gaps_per_Gb <- ((fullTableOfStats$Gaps / fullTableOfStats$Total_bp) *1000000000) +fullTableOfStats$Gaps_per_Gb <- as.integer(fullTableOfStats$Gaps_per_Gb) + + +selectionOfStats_internalComparisonHeatmap <- fullTableOfStats %>% + select(c('ASM_ID', 'Total_bp','GC_%', 'Gaps_per_Gb', 'Scaf', 'Cont', + 'Longest_scaf','Scaf_N50', 'Scaf_L50', 'Scaf_N90', + 'Longest_cont', 'Cont_N50', 'Cont_L50','Cont_N90', + 'qv', 'Kmer_Compl', "Comp_BUSCOs_%" = BUSCO_C, + "Comp_Single_BUSCOs_%" = BUSCO_S)) %>% + mutate("Comp_BUSCOs_%" = as.numeric(sub("%", "", as.character(`Comp_BUSCOs_%`)))) %>% + mutate("Comp_Single_BUSCOs_%" = as.numeric(sub("%", "", as.character(`Comp_Single_BUSCOs_%`)))) + + +customBlue_max = "#2e96ff" + +customBlue_min = "#dcecfc" + +customGray_max = "#8c8c8c" + +customGray_min = "#e6e6e6" + + +sink(file = snakemake@output[[1]]) + +format_table(selectionOfStats_internalComparisonHeatmap, + align =c("l","c","c","c","c", "c", "c", "c", "c"), + list(Total_bp = color_tile(customGray_min, customGray_max), + `GC_%` = color_tile(customGray_min, customGray_max), + Gaps_per_Gb = color_tile(customBlue_max, customBlue_min), + Scaf = color_tile(customBlue_max,customBlue_min), + Cont = color_tile(customBlue_max,customBlue_min), + Longest_scaf = color_tile(customBlue_min, customBlue_max), + Scaf_N50 = color_tile(customBlue_min, customBlue_max), + Scaf_L50 = color_tile(customBlue_min, customBlue_max), + Scaf_N90 = color_tile(customBlue_min, customBlue_max), + Longest_cont = color_tile(customBlue_min, customBlue_max), + Cont_N50 = color_tile(customBlue_min, customBlue_max), + Cont_L50 = color_tile(customBlue_min, customBlue_max), + Cont_N90 = color_tile(customBlue_min, customBlue_max), + qv = color_tile(customBlue_min, customBlue_max), + Kmer_Compl = color_tile(customBlue_min, customBlue_max), + `Comp_BUSCOs_%` = color_tile(customBlue_min, customBlue_max), + `Comp_Single_BUSCOs_%` = color_tile(customBlue_min, customBlue_max))) + +cat('<br>') +cat("\n") +cat('<br>') + +legendTable<-read.table(file = snakemake@params[[1]], sep = "", header = TRUE, row.names=NULL, check.names = FALSE) + + +format_table(legendTable, + align =c("c","c","c", "c", "c", "c", "c","c","c","c", "c", "c", "c", "c","c", "c"), + list(Bases_Mb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Bases_Mb == 'Max',customGray_max, customGray_min))), + `GC_%` = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(`GC_%` == 'Max',customGray_max, customGray_min))), + Gaps_per_Gb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Gaps_per_Gb == 'Min',customBlue_max, customBlue_min))), + Scaf = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Scaf == 'Min',customBlue_max, customBlue_min))), + Cont = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Cont == 'Min',customBlue_max, customBlue_min))), + Longest_scaf = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Longest_scaf == 'Max',customBlue_max, customBlue_min))), + Scaf_N50 = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Scaf_N50 == 'Max',customBlue_max, customBlue_min))), + Scaf_L50 = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Scaf_L50 == 'Max',customBlue_max, customBlue_min))), + Scaf_N90 = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Scaf_N90 == 'Max',customBlue_max, customBlue_min))), + Longest_cont = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Longest_cont == 'Max',customBlue_max, customBlue_min))), + Cont_N50 = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Cont_N50 == 'Max',customBlue_max, customBlue_min))), + Cont_L50 = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Cont_L50 == 'Max',customBlue_max, customBlue_min))), + Cont_N90 = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Cont_N90 == 'Max',customBlue_max, customBlue_min))), + qv = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(qv == 'Max',customBlue_max, customBlue_min))), + Kmer_Compl = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Kmer_Compl == 'Max',customBlue_max, customBlue_min))), + `Comp_BUSCOs_%` = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(`Comp_BUSCOs_%` == 'Max',customBlue_max, customBlue_min))), + `Comp_Single_BUSCOs_%` = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(`Comp_Single_BUSCOs_%` == 'Max',customBlue_max, customBlue_min))))) + +sink(file = NULL) + + diff --git a/scripts/compare_results/internalComparison_legend_metrics.tsv b/scripts/compare_results/internalComparison_legend_metrics.tsv new file mode 100644 index 0000000..96dac4f --- /dev/null +++ b/scripts/compare_results/internalComparison_legend_metrics.tsv @@ -0,0 +1,5 @@ +Total_bp GC_% Gaps_per_Gb Scaf Cont Longest_scaf Scaf_N50 Scaf_L50 Scaf_N90 Longest_cont Cont_N50 Cont_L50 Cont_N90 qv Kmer_Compl Comp_BUSCOs_% Comp_Single_BUSCOs_% +'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' +'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' + + diff --git a/scripts/digest.py b/scripts/digest.py new file mode 100644 index 0000000..ea32be0 --- /dev/null +++ b/scripts/digest.py @@ -0,0 +1,26 @@ +from Bio.Seq import Seq +from Bio import SeqIO +from Bio.Restriction import * + +rb= +rb = DpnII + HinfI + DdeI + MseI +dpnii_dig=[] +for record in SeqIO.parse("GCA_905146935.1_idScaPyra1.1_genomic.fna", "fasta"): + my_seq = Seq(str(record.seq)) + dpnii_dig.append(DpnII.catalyse(my_seq)) + +test_1=list(sum(dpnii_dig, ())) + + +hinfi_dig=[] +for i in range(len(test_1)): + newSeq= test_1[i] + hinfi_dig.append(HinfI.catalyse(my_seq)) +print(hinfi_dig) + +test_2=list(sum(hinfi_dig, ())) + +ddei_dig=[] +for i in range(len(test_1)): + newSeq= test_1[i] + hinfi_dig.append(HinfI.catalyse(my_seq)) diff --git a/scripts/filter_five_end.pl b/scripts/filter_five_end.pl new file mode 100644 index 0000000..79d3205 --- /dev/null +++ b/scripts/filter_five_end.pl @@ -0,0 +1,110 @@ +#!/usr/bin/perl +use strict; +use warnings; + +my $prev_id = ""; +my @five; +my @three; +my @unmap; +my @mid; +my @all; +my $counter = 0; + + +while (<STDIN>){ + chomp; + if (/^@/){ + print $_."\n"; + next; + } + my ($id, $flag, $chr_from, $loc_from, $mapq, $cigar, $d1, $d2, $d3, $read, $read_qual, @rest) = split /\t/; + my $bin = reverse(dec2bin($flag)); + my @binary = split(//,$bin); + if ($prev_id ne $id && $prev_id ne ""){ + if ($counter == 1){ + if (@five == 1){ + print $five[0]."\n"; + } + else{ + my ($id_1, $flag_1, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1) = split /\t/, $all[0]; + my $bin_1 = reverse(dec2bin($flag_1)); + my @binary_1 = split(//,$bin_1); + $binary_1[2] = 1; + my $bin_1_new = reverse(join("",@binary_1)); + my $flag_1_new = bin2dec($bin_1_new); + print(join("\t",$id_1, $flag_1_new, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1)."\n"); + } + } + elsif ($counter == 2 && @five == 1){ + print $five[0]."\n"; + } + else{ + my ($id_1, $flag_1, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1) = split /\t/, $all[0]; + my $bin_1 = reverse(dec2bin($flag_1)); + my @binary_1 = split(//,$bin_1); + $binary_1[2] = 1; + my $bin_1_new = reverse(join("",@binary_1)); + my $flag_1_new = bin2dec($bin_1_new); + print(join("\t",$id_1, $flag_1_new, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1)."\n"); + } + + $counter = 0; + undef @unmap; + undef @five; + undef @three; + undef @mid; + undef @all; + } + + $counter++; + $prev_id = $id; + push @all,$_; + if ($binary[2]==1){ + push @unmap,$_; + } + elsif ($binary[4]==0 && $cigar =~ m/^[0-9]*M/ || $binary[4]==1 && $cigar =~ m/.*M$/){ + push @five, $_; + } + elsif ($binary[4]==1 && $cigar =~ m/^[0-9]*M/ || $binary[4]==0 && $cigar =~ m/.*M$/){ + push @three, $_; + } + elsif ($cigar =~ m/^[0-9]*[HS].*M.*[HS]$/){ + push @mid, $_; + } +} + +if ($counter == 1){ + if (@five == 1){ + print $five[0]."\n"; + } + else{ + my ($id_1, $flag_1, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1) = split /\t/, $all[0]; + my $bin_1 = reverse(dec2bin($flag_1)); + my @binary_1 = split(//,$bin_1); + $binary_1[2] = 1; + my $bin_1_new = reverse(join("",@binary_1)); + my $flag_1_new = bin2dec($bin_1_new); + print(join("\t",$id_1, $flag_1_new, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1)."\n"); + } +} +elsif ($counter == 2 && @five == 1){ + print $five[0]."\n"; +} +else{ + my ($id_1, $flag_1, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1) = split /\t/, $all[0]; + my $bin_1 = reverse(dec2bin($flag_1)); + my @binary_1 = split(//,$bin_1); + $binary_1[2] = 1; + my $bin_1_new = reverse(join("",@binary_1)); + my $flag_1_new = bin2dec($bin_1_new); + print(join("\t",$id_1, $flag_1_new, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1)."\n"); +} + +sub dec2bin { + my $str = unpack("B32", pack("N", shift)); + return $str; +} + +sub bin2dec { + return unpack("N", pack("B32", substr("0" x 32 . shift, -32))); +} diff --git a/scripts/fullTable_heatmap_external.R b/scripts/fullTable_heatmap_external.R new file mode 100644 index 0000000..3d251c8 --- /dev/null +++ b/scripts/fullTable_heatmap_external.R @@ -0,0 +1,132 @@ +suppressMessages(library(dplyr)) +suppressMessages(library(formattable)) +#suppressMessages(library(data.table)) + + +fullTableOfStats<-read.table(file = snakemake@input[[1]], sep = "", header = TRUE, row.names=NULL, check.names = FALSE) + + + +fullTableOfStats$Gaps_per_Gb <- ((fullTableOfStats$Gaps * fullTableOfStats$Est_Size_Mb)/1000) +fullTableOfStats$Gaps_per_Gb <- as.integer(fullTableOfStats$Gaps_per_Gb) + + +customPurple='#c699e8' +customGreen='#8fc773' + +selectionOfStats_colouredHeatmap <- fullTableOfStats %>% + select(c('ASM_ID','ASM_LEVEL', + 'Gaps_per_Gb', 'Scaff_NG50_Mb', + 'Cont_NG50_Mb','QV', + 'Completeness', 'Comp_Single_BUSCOs_%')) + +sink(file = snakemake@output[[1]]) +format_table(selectionOfStats_colouredHeatmap, + align =c("l","c","c","c","c", "c", "c", "c", "c"), + list(Gaps_per_Gb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(ASM_LEVEL == 'cont', "#666666", + ifelse(between(Gaps_per_Gb,1001, 10000), "#FFCC99", + ifelse(between(Gaps_per_Gb,201 , 1000), customGreen, + ifelse(Gaps_per_Gb <= 200, customPurple, "#FF9999")))))), + Scaff_NG50_Mb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(between(Scaff_NG50_Mb,0.1, 9.99999), "#FFCC99", + ifelse(between(Scaff_NG50_Mb,10, 99.99999), customGreen, + ifelse(Scaff_NG50_Mb >= 100, customPurple, "#FF9999"))))), + Cont_NG50_Mb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(between(Cont_NG50_Mb,0.01, 0.99999), "#FFCC99", + ifelse(between(Cont_NG50_Mb,1, 9.999999), customGreen, + ifelse(Cont_NG50_Mb >= 10, customPurple, "#FF9999"))))), + Completeness = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(between(Completeness,80, 89.9999), "#FFCC99", + ifelse(between(Completeness,90, 94.99999), customGreen, + ifelse(Completeness >= 95, customPurple, "#FF9999"))))), + `Comp_Single_BUSCOs_%` = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(between(`Comp_Single_BUSCOs_%`,80, 89.9999), "#FFCC99", + ifelse(between(`Comp_Single_BUSCOs_%`,90, 95), customGreen, + ifelse(`Comp_Single_BUSCOs_%` >= 95, customPurple, "#FF9999"))))), + QV = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(between(QV,35, 39.9999999999), "#FFCC99", + ifelse(between(QV,40, 49.999999999), customGreen, + ifelse(QV >= 50, customPurple, "#FF9999"))))))) + +cat('<br>') +cat("\n") +cat('<br>') + +legendTable<-read.table(file = snakemake@params[[1]], sep = "", header = TRUE, row.names=NULL, check.names = FALSE) + +format_table(legendTable, + align =c("c","c","c", "c", "c", "c", "c"), + list(Gaps_per_Gb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + "font.size" = "12px", + "width" = '150px', + `background-color` = ifelse(Gaps_per_Gb == '> 10000',"#FF9999", + ifelse(Gaps_per_Gb == '1000 - 10000',"#FFCC99", + ifelse(Gaps_per_Gb == '200 - 1000',customGreen, customPurple))))), + Scaff_NG50_Mb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + "font.size" = "12px", + "width" = '150px', + `background-color` = ifelse(Scaff_NG50_Mb == '< 0.1Mbp',"#FF9999", + ifelse(Scaff_NG50_Mb == '0.1Mbp - 10Mbp',"#FFCC99", + ifelse(Scaff_NG50_Mb == '10Mbp - 100Mbp',customGreen, customPurple))))), + Cont_NG50_Mb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + "font.size" = "12px", + "width" = '150px', + `background-color` = ifelse(Cont_NG50_Mb == '< 0.01Mbp',"#FF9999", + ifelse(Cont_NG50_Mb == '0.01Mbp - 1Mbp',"#FFCC99", + ifelse(Cont_NG50_Mb == '1Mbp - 10Mbp',customGreen, customPurple))))), + QV = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + "font.size" = "12px", + "width" = '100px', + `background-color` = ifelse(QV == '< 35',"#FF9999", + ifelse(QV == '35 - 40',"#FFCC99", + ifelse(QV == '40 - 50',customGreen, customPurple))))), + Completeness = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + "font.size" = "12px", + "width" = '150px', + `background-color` = ifelse(Completeness == '< 80%',"#FF9999", + ifelse(Completeness == '80% - 90%',"#FFCC99", + ifelse(Completeness == '90% - 95%',customGreen, customPurple))))), + `Comp_Single_BUSCOs_%` = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + "font.size" = "12px", + "width" = '220px', + `background-color` = ifelse(`Comp_Single_BUSCOs_%` == '< 80%',"#FF9999", + ifelse(`Comp_Single_BUSCOs_%` == '80% - 90%',"#FFCC99", + ifelse(`Comp_Single_BUSCOs_%` == '90% - 95%',customGreen, customPurple))))))) +sink(file = NULL) diff --git a/scripts/fullTable_heatmap_internalComparison.R b/scripts/fullTable_heatmap_internalComparison.R new file mode 100644 index 0000000..e1d5ae7 --- /dev/null +++ b/scripts/fullTable_heatmap_internalComparison.R @@ -0,0 +1,140 @@ +suppressMessages(library(dplyr)) +suppressMessages(library(formattable)) + +fullTableOfStats<-read.table(file = snakemake@input[[1]], sep = "", header = TRUE, row.names=NULL, check.names = FALSE) + +fullTableOfStats$Gaps_per_Gb <- ((fullTableOfStats$Gaps * fullTableOfStats$Est_Size_Mb)/1000) +fullTableOfStats$Gaps_per_Gb <- as.integer(fullTableOfStats$Gaps_per_Gb) + + + +selectionOfStats_internalComparisonHeatmap <- fullTableOfStats %>% + select(c('ASM_ID','ASM_LEVEL', 'Bases_Mb', 'Het_%','GC_%', 'Gaps_per_Gb', 'Scaff', 'Cont', + 'Longest_Scaff_Mb','Scaff_NG50_Mb', 'Scaff_NG95_Mb', + 'Longest_Cont_Mb', 'Cont_NG50_Mb','Cont_NG95_Mb', + 'QV', 'Completeness', + 'Comp_BUSCOs_%','Comp_Single_BUSCOs_%')) + + + + +customBlue_max = "#2e96ff" + +customBlue_min = "#dcecfc" + + +customGray_max = "#8c8c8c" + +customGray_min = "#e6e6e6" + +sink(file = snakemake@output[[1]]) + +format_table(selectionOfStats_internalComparisonHeatmap, + align =c("l","c","c","c","c", "c", "c", "c", "c"), + list(Bases_Mb = color_tile(customGray_min, customGray_max), + `Het_%` = color_tile(customGray_min, customGray_max), + `GC_%` = color_tile(customGray_min, customGray_max), + Gaps_per_Gb = color_tile(customBlue_max, customBlue_min), + Scaff = color_tile(customBlue_max,customBlue_min), + Cont = color_tile(customBlue_max,customBlue_min), + Longest_Scaff_Mb = color_tile(customBlue_min, customBlue_max), + Scaff_NG50_Mb = color_tile(customBlue_min, customBlue_max), + Scaff_NG95_Mb = color_tile(customBlue_min, customBlue_max), + Longest_Cont_Mb = color_tile(customBlue_min, customBlue_max), + Cont_NG50_Mb = color_tile(customBlue_min, customBlue_max), + Cont_NG95_Mb = color_tile(customBlue_min, customBlue_max), + QV = color_tile(customBlue_min, customBlue_max), + Completeness = color_tile(customBlue_min, customBlue_max), + `Comp_BUSCOs_%` = color_tile(customBlue_min, customBlue_max), + `Comp_Single_BUSCOs_%` = color_tile(customBlue_min, customBlue_max))) + +cat('<br>') +cat("\n") +cat('<br>') + +legendTable<-read.table(file = snakemake@params[[1]], sep = "", header = TRUE, row.names=NULL, check.names = FALSE) + +format_table(legendTable, + align =c("c","c","c", "c", "c", "c", "c","c","c","c", "c", "c", "c", "c","c", "c"), + list(Bases_Mb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Bases_Mb == 'Max',customGray_max, customGray_min))), + `Het_%` = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(`Het_%` == 'Max',customGray_max, customGray_min))), + `GC_%` = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(`GC_%` == 'Max',customGray_max, customGray_min))), + Gaps_per_Gb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Gaps_per_Gb == 'Min',customBlue_max, customBlue_min))), + Scaff = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Scaff == 'Min',customBlue_max, customBlue_min))), + Cont = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Cont == 'Min',customBlue_max, customBlue_min))), + Longest_Scaff_Mb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Longest_Scaff_Mb == 'Max',customBlue_max, customBlue_min))), + Scaff_NG50_Mb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Scaff_NG50_Mb == 'Max',customBlue_max, customBlue_min))), + Scaff_NG95_Mb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Scaff_NG95_Mb == 'Max',customBlue_max, customBlue_min))), + Longest_Cont_Mb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Longest_Cont_Mb == 'Max',customBlue_max, customBlue_min))), + Cont_NG50_Mb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Cont_NG50_Mb == 'Max',customBlue_max, customBlue_min))), + Cont_NG95_Mb = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Cont_NG95_Mb == 'Max',customBlue_max, customBlue_min))), + QV = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(QV == 'Max',customBlue_max, customBlue_min))), + Completeness = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(Completeness == 'Max',customBlue_max, customBlue_min))), + `Comp_BUSCOs_%` = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(`Comp_BUSCOs_%` == 'Max',customBlue_max, customBlue_min))), + `Comp_Single_BUSCOs_%` = formatter("span", + style = ~style(display = "block", + padding = "0 4px", + `border-radius` = "3px", + `background-color` = ifelse(`Comp_Single_BUSCOs_%` == 'Max',customBlue_max, customBlue_min))))) + +sink(file = NULL) diff --git a/scripts/internalComparison_legend.tsv b/scripts/internalComparison_legend.tsv new file mode 100644 index 0000000..b8e16cc --- /dev/null +++ b/scripts/internalComparison_legend.tsv @@ -0,0 +1,3 @@ +Bases_Mb Het_% GC_% Gaps_per_Gb Scaff Cont Longest_Scaff_Mb Scaff_NG50_Mb Scaff_NG95_Mb Longest_Cont_Mb Cont_NG50_Mb Cont_NG95_Mb QV Completeness Comp_BUSCOs_% Comp_Single_BUSCOs_% +'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' 'Max' +'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' 'Min' diff --git a/scripts/makePDF_indivMD.py b/scripts/makePDF_indivMD.py new file mode 100644 index 0000000..e69de29 diff --git a/scripts/pretextMapsToMarkdown.py b/scripts/pretextMapsToMarkdown.py new file mode 100644 index 0000000..bd87669 --- /dev/null +++ b/scripts/pretextMapsToMarkdown.py @@ -0,0 +1,6 @@ +import shutil +shutil.copyfile(snakemake.input.PretextMap, snakemake.output.pretextCP2keyResults) +with open(snakemake.output.IndividualPretextMD, "w") as out: + print("", file=out) + print("###",snakemake.params.assemblyName," HiC Heatmap", file=out) + print("{ height=30% }", file=out) diff --git a/scripts/process_HiC/filter_five_end.pl b/scripts/process_HiC/filter_five_end.pl new file mode 100644 index 0000000..79d3205 --- /dev/null +++ b/scripts/process_HiC/filter_five_end.pl @@ -0,0 +1,110 @@ +#!/usr/bin/perl +use strict; +use warnings; + +my $prev_id = ""; +my @five; +my @three; +my @unmap; +my @mid; +my @all; +my $counter = 0; + + +while (<STDIN>){ + chomp; + if (/^@/){ + print $_."\n"; + next; + } + my ($id, $flag, $chr_from, $loc_from, $mapq, $cigar, $d1, $d2, $d3, $read, $read_qual, @rest) = split /\t/; + my $bin = reverse(dec2bin($flag)); + my @binary = split(//,$bin); + if ($prev_id ne $id && $prev_id ne ""){ + if ($counter == 1){ + if (@five == 1){ + print $five[0]."\n"; + } + else{ + my ($id_1, $flag_1, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1) = split /\t/, $all[0]; + my $bin_1 = reverse(dec2bin($flag_1)); + my @binary_1 = split(//,$bin_1); + $binary_1[2] = 1; + my $bin_1_new = reverse(join("",@binary_1)); + my $flag_1_new = bin2dec($bin_1_new); + print(join("\t",$id_1, $flag_1_new, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1)."\n"); + } + } + elsif ($counter == 2 && @five == 1){ + print $five[0]."\n"; + } + else{ + my ($id_1, $flag_1, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1) = split /\t/, $all[0]; + my $bin_1 = reverse(dec2bin($flag_1)); + my @binary_1 = split(//,$bin_1); + $binary_1[2] = 1; + my $bin_1_new = reverse(join("",@binary_1)); + my $flag_1_new = bin2dec($bin_1_new); + print(join("\t",$id_1, $flag_1_new, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1)."\n"); + } + + $counter = 0; + undef @unmap; + undef @five; + undef @three; + undef @mid; + undef @all; + } + + $counter++; + $prev_id = $id; + push @all,$_; + if ($binary[2]==1){ + push @unmap,$_; + } + elsif ($binary[4]==0 && $cigar =~ m/^[0-9]*M/ || $binary[4]==1 && $cigar =~ m/.*M$/){ + push @five, $_; + } + elsif ($binary[4]==1 && $cigar =~ m/^[0-9]*M/ || $binary[4]==0 && $cigar =~ m/.*M$/){ + push @three, $_; + } + elsif ($cigar =~ m/^[0-9]*[HS].*M.*[HS]$/){ + push @mid, $_; + } +} + +if ($counter == 1){ + if (@five == 1){ + print $five[0]."\n"; + } + else{ + my ($id_1, $flag_1, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1) = split /\t/, $all[0]; + my $bin_1 = reverse(dec2bin($flag_1)); + my @binary_1 = split(//,$bin_1); + $binary_1[2] = 1; + my $bin_1_new = reverse(join("",@binary_1)); + my $flag_1_new = bin2dec($bin_1_new); + print(join("\t",$id_1, $flag_1_new, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1)."\n"); + } +} +elsif ($counter == 2 && @five == 1){ + print $five[0]."\n"; +} +else{ + my ($id_1, $flag_1, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1) = split /\t/, $all[0]; + my $bin_1 = reverse(dec2bin($flag_1)); + my @binary_1 = split(//,$bin_1); + $binary_1[2] = 1; + my $bin_1_new = reverse(join("",@binary_1)); + my $flag_1_new = bin2dec($bin_1_new); + print(join("\t",$id_1, $flag_1_new, $chr_from_1, $loc_from_1, $mapq_1, $cigar_1, $d1_1, $d2_1, $d3_1, $read_1, $read_qual_1, @rest_1)."\n"); +} + +sub dec2bin { + my $str = unpack("B32", pack("N", shift)); + return $str; +} + +sub bin2dec { + return unpack("N", pack("B32", substr("0" x 32 . shift, -32))); +} diff --git a/scripts/process_HiC/two_read_bam_combiner.pl b/scripts/process_HiC/two_read_bam_combiner.pl new file mode 100644 index 0000000..96e6149 --- /dev/null +++ b/scripts/process_HiC/two_read_bam_combiner.pl @@ -0,0 +1,124 @@ +#!/usr/bin/perl + +use strict; + +MAIN : { + + my ($read1_bam, $read2_bam) = @ARGV; + + if ((not defined $read1_bam) || + (not defined $read2_bam)) { + die ("Usage: ./two_read_bam_combiner.pl <read 1 bam> <read 2 bam>\n"); + } + + open(FILE1,"samtools view -h $read1_bam |"); + open(FILE2,"samtools view -h $read2_bam |"); + + my $line1 = <FILE1>; + my $line2 = <FILE2>; + + my $counter = 0; + my $new_counter = 0; + + while (defined $line1) { + ## the line directly below this needed to be modified slightly from the genotyping pipeline ## + if ($line1 =~ /^(\@)SQ/){ + if ($line1 ne $line2){print $line1;print $line2; die ("inconsistent bam header.");} + else{ + print $line1; + } + $line1 = <FILE1>; + $line2 = <FILE2>; + next; + } + + $counter++; + if ($counter == ($new_counter + 1000000)) { + print STDERR $counter . "\n"; + $new_counter = $counter; + } + + chomp $line1; + chomp $line2; + + my ($id1, $flag1, $chr_from1, $loc_from1, $mapq1, $cigar1, $d1_1, $d2_1, $d3_1, $read1, $read_qual1, @rest1) = split(/\t/,$line1); + my ($id2, $flag2, $chr_from2, $loc_from2, $mapq2, $cigar2, $d1_2, $d2_2, $d3_2, $read2, $read_qual2, @rest2) = split(/\t/,$line2); + + if ($id1 ne $id2) { + die ("The id's of the two files do not match up at line number $counter\n"); + } + + my $bin1 = reverse(dec2bin($flag1)); + my $bin2 = reverse(dec2bin($flag2)); + + my @binary1 = split(//,$bin1); + my @binary2 = split(//,$bin2); + + my $trouble = 0; + if (($binary1[2] == 1) || ($mapq1 < 10)) { + $trouble = 1; + } + if (($binary2[2]== 1) || ($mapq2 < 10)) { + $trouble = 1; + } + + my $proper_pair1; + my $proper_pair2; + my $dist1; + my $dist2; + + if (($binary1[2] == 0) && ($binary2[2] == 0)) { + $proper_pair1 = 1; + $proper_pair2 = 1; + if ($chr_from1 eq $chr_from2) { + my $dist = abs($loc_from1 - $loc_from2); + if ($loc_from1 >= $loc_from2) { + $dist1 = -1*$dist; + $dist2 = $dist; + } else { + $dist1 = $dist; + $dist2 = -1*$dist; + } + } else { + $dist1 = 0; + $dist2 = 0; + } + } else { + $proper_pair1 = 0; + $proper_pair2 = 0; + $dist1 = 0; + $dist2 = 0; + } + + my $new_bin1 = join("","000000000000000000000",$binary1[10],$binary1[9],"0","0","1",$binary2[4],$binary1[4],$binary2[2],$binary1[2],$proper_pair1,"1"); + my $new_bin2 = join("","000000000000000000000",$binary2[10],$binary2[9],"0","1","0",$binary1[4],$binary2[4],$binary1[2],$binary2[2],$proper_pair2,"1"); + + my $new_flag1 = bin2dec($new_bin1); + my $new_flag2 = bin2dec($new_bin2); + + unless ($trouble == 1) { + + print(join("\t",$id1,$new_flag1,$chr_from1,$loc_from1,$mapq1,$cigar1,$chr_from2,$loc_from2,$dist1,$read1,$read_qual1,@rest1) . "\n"); + print(join("\t",$id2,$new_flag2,$chr_from2,$loc_from2,$mapq2,$cigar2,$chr_from1,$loc_from1,$dist2,$read2,$read_qual2,@rest2) . "\n"); + + } + + $line1 = <FILE1>; + $line2 = <FILE2>; + + } + +} + + + +sub dec2bin { + + my $str = unpack("B32", pack("N", shift)); + return $str; + +} + +sub bin2dec { + return unpack("N", pack("B32", substr("0" x 32 . shift, -32))); +} diff --git a/scripts/report/addFullTableForReport.py b/scripts/report/addFullTableForReport.py new file mode 100644 index 0000000..67e52bc --- /dev/null +++ b/scripts/report/addFullTableForReport.py @@ -0,0 +1,26 @@ +import pandas as pd +from tabulate import tabulate +samples=pd.read_csv(snakemake.input.results, dtype=str, index_col=0, delim_whitespace=True, skip_blank_lines=True) +# samples=samples.reset_index +turn2FloatAndMb=['Bases_Mb','Est_Size_Mb','Longest_Scaff_Mb','Scaff_N50_Mb','Scaff_NG50_Mb','Scaff_N95_Mb','Scaff_NG95_Mb','Longest_Cont_Mb','Cont_N50_Mb','Cont_NG50_Mb','Cont_N95_Mb','Cont_NG95_Mb'] +roundDecimals=['Comp_BUSCOs_%','Comp_Single_BUSCOs_%','Het_%','GC_%','QV','Completeness','Bases_Mb','Est_Size_Mb','Longest_Scaff_Mb','Scaff_N50_Mb','Scaff_NG50_Mb','Scaff_N95_Mb','Scaff_NG95_Mb','Longest_Cont_Mb','Cont_N50_Mb','Cont_NG50_Mb','Cont_N95_Mb','Cont_NG95_Mb'] +# print('this is samples',samples) +sampleTransposed=samples.T +# print('this is sampleTransposed',sampleTransposed) +for header in turn2FloatAndMb: + sampleTransposed[header]=sampleTransposed[header].astype(float).div(1000000) +for i in range(0,4): + sampleTransposed[roundDecimals[i]]=sampleTransposed[roundDecimals[i]].str.replace('%','') +for roundHeader in roundDecimals: + sampleTransposed[roundHeader]=sampleTransposed[roundHeader].astype(float).round(2) +with open(snakemake.output[0], "w") as out_markdown: + print("", file=out_markdown) + print("\\blandscape", file=out_markdown) + print("", file=out_markdown) + print("\\tiny", file=out_markdown) + print(tabulate(sampleTransposed.rename_axis('ASM_ID'), headers='keys',tablefmt="pipe", showindex=True), file=out_markdown) + print("\\elandscape", file=out_markdown) + print("", file=out_markdown) + +with open(snakemake.output[1], "w") as out_plain: + print(tabulate(sampleTransposed.rename_axis('ASM_ID'), headers='keys',tablefmt="plain", showindex=True), file=out_plain) diff --git a/scripts/report/addFullTableForReport_all_metrics.py b/scripts/report/addFullTableForReport_all_metrics.py new file mode 100644 index 0000000..93a2f05 --- /dev/null +++ b/scripts/report/addFullTableForReport_all_metrics.py @@ -0,0 +1,8 @@ +import pandas as pd +from tabulate import tabulate +samples=pd.read_csv(snakemake.input.results, dtype=str, index_col=0, delim_whitespace=True, skip_blank_lines=True) + +sampleTransposed=samples.T + +with open(snakemake.output[0], "w") as out_plain: + print(tabulate(sampleTransposed.rename_axis('ASM_ID'), headers='keys',tablefmt="plain", showindex=True), file=out_plain) diff --git a/scripts/report/makeAllMetricsTable.sh b/scripts/report/makeAllMetricsTable.sh new file mode 100755 index 0000000..2ade08b --- /dev/null +++ b/scripts/report/makeAllMetricsTable.sh @@ -0,0 +1,47 @@ +#usage: ASM ID, gfastats, qv, busco, completeness, output + +#asm_id="AssemblyX" +asm_id=$1 +#gfastats="/scratch/galeov97/begendiv/bApuApu/results3/1_evaluation/AssemblyX.illu/ASSEMBLY_STATISTICS/AssemblyX.illu_gfastats.txt" +gfastats=$2 +#qv="/scratch/galeov97/begendiv/bApuApu/results3/1_evaluation/AssemblyX.illu/QV.KMER-COMPLETENESS.CN-SPECTRA/AssemblyX.illu_merqOutput.qv" +qv=$3 +#busco="/scratch/galeov97/begendiv/bApuApu/results3/1_evaluation/AssemblyX.illu/BUSCOs/AssemblyX.illu/short_summary.specific.aves_odb10.AssemblyX.illu.txt" +busco=$4 +#completeness="/scratch/galeov97/begendiv/bApuApu/results3/1_evaluation/AssemblyX.illu/QV.KMER-COMPLETENESS.CN-SPECTRA/AssemblyX.illu_merqOutput.completeness.stats" +completeness=$5 +asm_number=$6 +output=$7 + +echo -e "ASM_ID\t$asm_id" >> $output +total_bp="$(grep 'Total scaffold length:' "${gfastats}" | awk '{print $4}')" +echo -e "Total_bp\t$total_bp" >> $output +echo -e "GC_%\t$(grep 'GC content' "${gfastats}" | awk '{print $4}')" >> $output +echo -e "Scaf\t$(grep '# scaffolds' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Cont\t$(grep '# contigs' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Gaps\t$(grep '# gaps' "${gfastats}" | awk '{print $5}')" >> $output +gaps_bp="$(grep 'Total gap length' "${gfastats}" | awk '{print $6}')" +echo -e "Gaps_bp\t$gaps_bp" >> $output +echo -e "Gaps%\t$(awk "BEGIN { printf \"%.3f\", ($gaps_bp / $total_bp) * 100 }")" >> $output +echo -e "Gaps/Gbp\t$(awk "BEGIN { printf \"%.3f\", ($gaps_bp / $total_bp) * 1000000000 }")" >> $output +echo -e "Longest_scaf\t$(grep 'Largest scaffold' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Scaf_auN\t$(grep 'Scaffold auN:' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Scaf_N50\t$(grep -m 1 'Scaffold N50:' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Scaf_L50\t$(grep -m 1 'Scaffold L50:' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Scaf_N90\t$(grep 'Scaffold N90:' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Scaf_L90\t$(grep 'Scaffold L90:' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Longest_cont\t$(grep 'Largest contig' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Cont_auN\t$(grep 'Contig auN:' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Cont_N50\t$(grep -m 1 'Contig N50:' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Cont_L50\t$(grep -m 1 'Contig L50:' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Cont_N90\t$(grep 'Contig N90:' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "Cont_L90\t$(grep 'Contig L90:' "${gfastats}" | awk '{print $3}')" >> $output +echo -e "qv\t$(head -n ${asm_number} ${qv} | tail -n 1 | awk '{print $4}')" >> $output +echo -e "Kmer_Compl\t$(awk {'print $5'} $completeness | head -n 1)" >> $output +echo -e "BUSCO_genes\t$(grep 'Complete BUSCOs' ${busco} | awk '{print $1}')" >> $output +echo -e "BUSCO_C\t$(grep 'C:' ${busco} | awk -F'[:\\[,]' {'print $2'})" >> $output +echo -e "BUSCO_S\t$(grep 'S:' ${busco} | awk -F'[:\\[,]' {'print $4'})" >> $output +echo -e "BUSCO_D\t$(grep 'D:' ${busco} | awk -F'[:\\[,\\]]' {'print $6'})" >> $output +echo -e "BUSCO_F\t$(grep 'F:' ${busco} | awk -F'[:\\[,]' {'print $8'})" >> $output +echo -e "BUSCO_M\t$(grep 'M:' ${busco} | awk -F'[:\\[,]' {'print $10'})" >> $output + diff --git a/scripts/report/makePDF_indivMD.py b/scripts/report/makePDF_indivMD.py new file mode 100644 index 0000000..da1a153 --- /dev/null +++ b/scripts/report/makePDF_indivMD.py @@ -0,0 +1,262 @@ +import os +import numpy as np +import pandas as pd +from tabulate import tabulate +import re + +gfastats_asm1=snakemake.input[0] + +# extract gfastats values +def extract_gfastats_values(content, keys): + return [re.findall(f"{key}: (.+)", content)[0] for key in keys] + +keys = ["Total scaffold length", + "GC content %", + "# gaps in scaffolds", + "Total gap length in scaffolds", + "# scaffolds", + "Largest scaffold", + "Scaffold N50", + "Scaffold L50", + "Scaffold L90", + "# contigs", + "Largest contig", + "Contig N50", + "Contig L50", + "Contig L90", + "Expected genome size", +] + +with open(gfastats_asm1, 'r') as file: + content = file.read() + data = extract_gfastats_values(content, keys) + +gfastats_table = pd.DataFrame({"Metric": keys, "asm1": data}) +expected_genome_size = gfastats_table[gfastats_table["Metric"] == "Expected genome size"]["asm1"].values[0] + +gfastats_table = gfastats_table.iloc[:-1] + + +genomescope_linear=snakemake.input[1] + +genomescope_log = "" +smudgeplot_plot = "" +run_smudgeplot = snakemake.params[6] +if run_smudgeplot: + smudgeplot_plot=snakemake.input[2] +else: + genomescope_log=snakemake.input[2] + + +QV_score=snakemake.input[3] +QV_score_table = pd.read_csv(QV_score, dtype=str, delim_whitespace=True, skip_blank_lines=True, names=["assembly_name","Assembly_Only", "Total_Kmers", "QV_Score", "Error"]) +QV_score_table = QV_score_table.drop(['Error'], axis=1).set_index(['assembly_name']) + +kmer_completeness=snakemake.input[4] +kmer_completeness_table = pd.read_csv(kmer_completeness, dtype=str, delim_whitespace=True, skip_blank_lines=True, names=["assembly_name","all","Kmers_Assembly", "Kmers_Reads", "%"]) +kmer_completeness_table = kmer_completeness_table.drop(["all"], axis=1).set_index(['assembly_name']) + +busco_asm1=snakemake.input[5] + +with open(busco_asm1) as scoresOnlyFile: + lines = scoresOnlyFile.readlines() + lines = [line.rstrip() for line in lines] + +#create the busco table: +percentage_data = re.split(r'[,\[\]]', lines[0]) +percentage_values = {item.split(':')[0]: item.split(':')[1].split('[')[0] for item in percentage_data if len(item) > 0} + +names = ["Complete", "Complete and single-copy", "Complete and duplicated", "Fragmented", "Missing", "Total searched groups"] +letters = list(percentage_values.keys()) +numbers = [] +percentages = [] + +# Iterate through the data starting from the second line +i = 0 +for line in lines[1:]: + parts = line.split('\t') + number = int(parts[0]) + + # Extract the corresponding percentage from the dictionary + percentage_key = letters[i] # Using the first letter as the key to match the percentage + percentage = float(percentage_values.get(percentage_key, '0.0').rstrip('%')) + + numbers.append(number) + percentages.append(percentage) + + i += 1 + + # Create a pandas DataFrame +busco_table = pd.DataFrame({ + 'Metric': names, + 'asm1 #': numbers, + 'asm1 %': percentages +}) + +kmer_flat=snakemake.input[6] +kmer_stacked=snakemake.input[7] +kmer_stacked_asm1=snakemake.input[8] +input_gscopeSum=snakemake.input[9] + +with open(input_gscopeSum, "r") as f: + for line in f: + if 'Heterozygous (ab)' in line: + heterozygous_ab_value = float(line.split()[3][:-1]) + heterozygous_ab_value = round(heterozygous_ab_value, 3) + break + + +params_assemblyID=snakemake.params[0] + +params_merylKmer=snakemake.params[1] + +params_buscoDB=snakemake.params[2] + +params_asm2provided=snakemake.params[3] + +number_haploptypes=2 if params_asm2provided else 1 + +if params_asm2provided: + gfastats_asm2=snakemake.params[4] + + with open(gfastats_asm2, 'r') as file: + content = file.read() + data = extract_gfastats_values(content, keys) + + gfastats_asm2_table = pd.DataFrame({"Metric": keys, "asm2": data}) + + gfastats_asm2_table = gfastats_asm2_table.iloc[:-1] + gfastats_table = pd.concat([gfastats_table, gfastats_asm2_table["asm2"]], axis=1) + + busco_asm2=snakemake.params[5] + + with open(busco_asm2) as scoresOnlyFile: + lines = scoresOnlyFile.readlines() + lines = [line.rstrip() for line in lines] + + percentages = [] + numbers = [] + # Iterate through the data starting from the second line + i = 0 + for line in lines[1:]: + parts = line.split('\t') + number = int(parts[0]) + + # Extract the corresponding percentage from the dictionary + percentage_key = letters[i] # Using the first letter as the key to match the percentage + percentage = float(percentage_values.get(percentage_key, '0.0').rstrip('%')) + + numbers.append(number) + percentages.append(percentage) + + i += 1 + + busco_table_2 = pd.DataFrame({ + 'Metric': names, + 'asm2 #': numbers, + 'asm2 %': percentages + }) + + busco_table = pd.merge(busco_table, busco_table_2, on='Metric') + print (busco_table) + + +else: + gfastats_asm2="" + busco_asm2="" + +output_file=snakemake.output[0] + +with open(output_file, 'w') as outFile: +# print("---", file=outFile) +# print("geometry: b3paper, margin=1.3cm", file=outFile) +# print("classoption:", file=outFile) +# print(" - table", file=outFile) +# print(" - twocolumn", file=outFile) +# print("urlcolor: blue", file=outFile) +# print("colorlinks: true", file=outFile) +# print("header-includes:", file=outFile) +# print("- |", file=outFile) +# print(" ```{=latex}", file=outFile) +# print(r" \rowcolors{2}{gray!10}{gray!25}", file=outFile) +# print(r" \usepackage{longtable}", file=outFile) +# print(r" \usepackage{supertabular}", file=outFile) +# print(r" \let\longtable\supertabular", file=outFile) +# print(r" \let\endlongtable\endsupertabular", file=outFile) +# print(r" \let\endhead\relax", file=outFile) +# print(r" ```", file=outFile) +# print("output: pdf_document", file=outFile) +# print("---", file=outFile) + print("", file=outFile) + print("\\Large", file=outFile) + print("\\twocolumn", file=outFile) + print("# \\LARGE Assembly ID:", params_assemblyID, file=outFile) + print("\\hrule", file=outFile) # Add a horizontal line + print("Database built with kmer size: ", params_merylKmer, "bp\n", file=outFile) + print("Number of haploptypes/assemblies: ", number_haploptypes, file=outFile) + print("\nExpected genome size: ", expected_genome_size, file=outFile) + print("\nHeterozygous: ", heterozygous_ab_value, " %", file=outFile) + print("", file=outFile) + print("\\large", file=outFile) + print("## QV Score", file=outFile) + print(tabulate(QV_score_table, headers='keys',tablefmt="pipe", showindex=True), file=outFile) + print("", file=outFile) + print("\\large", file=outFile) + print("## Kmer Completeness", file=outFile) + print(tabulate(kmer_completeness_table, headers='keys',tablefmt="pipe", showindex=True), file=outFile) + print("", file=outFile) + print("## BUSCOv5 (database: ", params_buscoDB, ")", file=outFile) + print(tabulate(busco_table, headers='keys',tablefmt="pipe", showindex=False, disable_numparse=True), file=outFile) + print("## Genomescope2 Profile (Linear)", file=outFile) + print("{ width=38% }", file=outFile) + print("", file=outFile) + + if run_smudgeplot: + print("## Smudgeplot Profile", file=outFile) + print("{ width=38% }", file=outFile) + print("", file=outFile) + else: + print("## Genomescope2 Profile (Log)", file=outFile) + print("{ width=38% }", file=outFile) + print("\\", file=outFile) + + print("\\pagebreak", file=outFile) + print("\\", file=outFile) + print("\\Large", file=outFile) + print("", file=outFile) + print("", file=outFile) + print("", file=outFile) + print("## Genome Statistics with gfastats", file=outFile) + print(tabulate(gfastats_table, headers='keys',tablefmt="pipe", showindex=False, disable_numparse=True), file=outFile) + print("", file=outFile) + print("", file=outFile) + print("\\", file=outFile) + print("\\", file=outFile) + print("\\large", file=outFile) + print("", file=outFile) + print("\\large", file=outFile) + print("", file=outFile) + print("\\large", file=outFile) + print("", file=outFile) + + + if not params_asm2provided: + print("## K-mer Multiplicity asm1 Only (Flat)", file=outFile) + print("{ width=43% }", file=outFile) + print("\\", file=outFile) + print("\\Large", file=outFile) + print("", file=outFile) + + print("## K-mer Multiplicity asm1 Only (Stacked)", file=outFile) + print("{ width=43% }", file=outFile) + print("\\", file=outFile) + print("\\Large", file=outFile) + print("", file=outFile) + + if params_asm2provided: + print("## K-mer Multiplicity asm1 + asm2 (Stacked)", file=outFile) + print("{ width=43% }", file=outFile) + print("\\", file=outFile) + print("\\Large", file=outFile) + print("", file=outFile) + diff --git a/scripts/report/make_erga_assembly_report.py b/scripts/report/make_erga_assembly_report.py new file mode 100644 index 0000000..f684f7b --- /dev/null +++ b/scripts/report/make_erga_assembly_report.py @@ -0,0 +1,80 @@ +import os +import sys +import pandas as pd + +def create_yaml_file(output_file_path, samples_tsv_path, merylDB, buscoDataBaseName, results_folder, smudgeplot): + genomescope_version = "2.0" + smudgeplot_version = "0.2.5" + + + yaml_content = f'''# This is the yaml file for generating the ERGA Assembly Report (EAR) using the make_EAR.py script (https://github.com/ERGA-consortium/EARs) +# Please complete the required information pointed as #<Insert ...> +# The file [example]rCarCar2_EAR.yaml contains an example of a completed yaml file (https://github.com/ERGA-consortium/EARs) + +# GENERAL INFORMATION +ToLID: #<Insert ToLID> +Species: #<Insert species name> +Submitter: #<Insert submitter full name> +Affiliation: #<Insert affiliation> +Tag: #<Insert tag> # valid tags are ERGA-Pilot, ERGA-BGE, ERGA-Satellite + + +# SEQUENCING DATA +DATA: + - #<Insert data type>: #<insert data coverage> # if coverage is not available, leave it empty + +# GENOME PROFILING DATA +PROFILING: + GenomeScope: + version: {genomescope_version} + results_folder: {results_folder}/1_evaluation/kmer_profiling/{merylDB}/genomescope/ +''' + if smudgeplot: + yaml_content += f''' Smudgeplot: + version: {smudgeplot_version} + results_folder: {results_folder}/1_evaluation/kmer_profiling/{merylDB}/smudgeplot/ +''' + + yaml_content += f'''# ASSEMBLY PIPELINE +''' + + samples = pd.read_csv(samples_tsv_path, dtype=str, index_col=False, delim_whitespace=True, skip_blank_lines=True) + samples=samples.set_index(['ID']) + + for asmID, row in samples.iterrows(): + assembly_level = "" + if row['ASM_LEVEL'] == "cont": + assembly_level = "CONTIGGING" + elif row['ASM_LEVEL'] == "scaf": + assembly_level = "SCAFFOLDING" + + yaml_content += f'''{assembly_level}: + {asmID}: # this name was autocompleted by GEP based on input data, check the correct naming of the tool + version: #<Insert tool version> # if not available, leave it empty + Pri: # this name was autocompleted by GEP, check the correct naming, valid types are hap1 (or hap2 if available), pri (or alt if available), collapsed... + gfastats--nstar-report_txt: {results_folder}/1_evaluation/{asmID}/ASSEMBLY_STATISTICS/asm1/{asmID}_gfastats.txt + busco_short_summary_txt: {results_folder}/1_evaluation/{asmID}/BUSCOs/{asmID}/asm1/short_summary.specific.{buscoDataBaseName}_odb10.{asmID}.txt + merqury_folder: {results_folder}/1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA +''' + + if (row['ALT_asm'] != "None") and (not pd.isna(row['ALT_asm'])): + yaml_content += f''' Alt: # this name was autocompleted by GEP, check the correct naming, valid types are hap1 (or hap2 if available), pri (or alt if available), collapsed... + gfastats--nstar-report_txt: {results_folder}/1_evaluation/{asmID}/ASSEMBLY_STATISTICS/asm2/{asmID}_gfastats.txt + busco_short_summary_txt: {results_folder}/1_evaluation/{asmID}/BUSCOs/{asmID}/asm2/short_summary.specific.{buscoDataBaseName}_odb10.{asmID}.txt + merqury_folder: {results_folder}/1_evaluation/{asmID}/QV.KMER-COMPLETENESS.CN-SPECTRA + +''' + else: yaml_content += "\n" + + with open(output_file_path, 'w') as file: + file.write(yaml_content) + + +if __name__ == "__main__": + output_file_path = sys.argv[1] + samples_tsv_path = sys.argv[2] + merylDB = sys.argv[3] + buscoDataBaseName = sys.argv[4] + results_folder = sys.argv[5] + smudgeplot = True if sys.argv[6] == "True" else False + create_yaml_file(output_file_path, samples_tsv_path, merylDB, buscoDataBaseName, results_folder, smudgeplot) diff --git a/scripts/report/pretextMapsToMarkdown.py b/scripts/report/pretextMapsToMarkdown.py new file mode 100644 index 0000000..bd87669 --- /dev/null +++ b/scripts/report/pretextMapsToMarkdown.py @@ -0,0 +1,6 @@ +import shutil +shutil.copyfile(snakemake.input.PretextMap, snakemake.output.pretextCP2keyResults) +with open(snakemake.output.IndividualPretextMD, "w") as out: + print("", file=out) + print("###",snakemake.params.assemblyName," HiC Heatmap", file=out) + print("{ height=30% }", file=out) diff --git a/scripts/report/reportLandingPage.md b/scripts/report/reportLandingPage.md new file mode 100644 index 0000000..f72f3be --- /dev/null +++ b/scripts/report/reportLandingPage.md @@ -0,0 +1,19 @@ +--- +geometry: b3paper, margin=1.3cm +classoption: + - table + - twocolumn +urlcolor: blue +colorlinks: true +header-includes: +- | + ```{=latex} + \rowcolors{2}{gray!10}{gray!25} + \usepackage{longtable} + \usepackage{supertabular} + \let\longtable\supertabular + \let\endlongtable\endsupertabular + \let\endhead\relax + ``` +output: pdf_document +--- diff --git a/scripts/report/tableOnSamePage.css b/scripts/report/tableOnSamePage.css new file mode 100644 index 0000000..ad4e7ad --- /dev/null +++ b/scripts/report/tableOnSamePage.css @@ -0,0 +1,3 @@ +<style> +.main-container { width: 1200px; max-width:2800px;} +</style> diff --git a/scripts/reportLandingPage.md b/scripts/reportLandingPage.md new file mode 100644 index 0000000..f72f3be --- /dev/null +++ b/scripts/reportLandingPage.md @@ -0,0 +1,19 @@ +--- +geometry: b3paper, margin=1.3cm +classoption: + - table + - twocolumn +urlcolor: blue +colorlinks: true +header-includes: +- | + ```{=latex} + \rowcolors{2}{gray!10}{gray!25} + \usepackage{longtable} + \usepackage{supertabular} + \let\longtable\supertabular + \let\endlongtable\endsupertabular + \let\endhead\relax + ``` +output: pdf_document +--- diff --git a/scripts/reportTABLELandingPage.md b/scripts/reportTABLELandingPage.md new file mode 100644 index 0000000..f3c9eab --- /dev/null +++ b/scripts/reportTABLELandingPage.md @@ -0,0 +1,19 @@ +--- +geometry: b3paper, margin=1.3cm +classoption: + - table + - onecolumn +urlcolor: blue +colorlinks: true +header-includes: +- | + ```{=latex} + \rowcolors{2}{gray!10}{gray!25} + \usepackage{longtable} + \let\endhead\relax + \usepackage{pdflscape} + \newcommand{\blandscape}{\begin{landscape}} + \newcommand{\elandscape}{\end{landscape}} + ``` +output: pdf_document +--- diff --git a/scripts/standIN_files/ALT_missing.fasta b/scripts/standIN_files/ALT_missing.fasta new file mode 100644 index 0000000..e69de29 diff --git a/scripts/standIN_files/snakemake_missing.txt b/scripts/standIN_files/snakemake_missing.txt new file mode 100644 index 0000000..e69de29 diff --git a/scripts/stats.py b/scripts/stats.py new file mode 100644 index 0000000..497b6e8 --- /dev/null +++ b/scripts/stats.py @@ -0,0 +1,368 @@ +import numpy as np +from itertools import groupby +import json +import sys +import csv +import os +import pandas as pd +from tabulate import tabulate + +#pd.set_option('display.float_format', lambda x: '%,g' % x) +with open(sys.argv[2]) as f: + ff=f.readlines() + +# estSizeFile = open() +# +# size_bp = estSizeFile.readlines() + +estSize=int(float(sys.argv[4])) + +if estSize == 0: + size_bp = ff[0][:-1] + print("No estimated Genome Size provided, using estimation from Genomescope2: ", size_bp) +else: + print("Estimated Genome Size provided as:", estSize, " using this size for NG and LG stats") + size_bp = estSize + +def fasta_iter(fasta_file): + """ + Takes a FASTA file, and produces a generator of Header and Sequences. + This is a memory-efficient way of analyzing a FASTA files -- without + reading the entire file into memory. + + Parameters + ---------- + fasta_file : str + The file location of the FASTA file + + Returns + ------- + header: str + The string contained in the header portion of the sequence record + (everything after the '>') + seq: str + The sequence portion of the sequence record + """ + + fh = open(fasta_file) + fa_iter = (x[1] for x in groupby(fh, lambda line: line[0] == ">")) + for header in fa_iter: + # drop the ">" + header = next(header)[1:].strip() + # join all sequence lines to one. + seq = "".join(s.upper().strip() for s in next(fa_iter)) + yield header, seq + + +def read_genome(fasta_file): + """ + Takes a FASTA file, and produces 2 lists of sequence lengths. It also + calculates the GC Content, since this is the only statistic that is not + calculated based on sequence lengths. + + Parameters + ---------- + fasta_file : str + The file location of the FASTA file + + Returns + ------ + contig_lens: list + A list of lengths of all contigs in the genome. + scaffold_lens: list + A list of lengths of all scaffolds in the genome. + gc_cont: float + The percentage of total basepairs in the genome that are either G or C. + """ + + gc = 0 + total_len = 0 + contig_lens = [] + scaffold_lens = [] + for _, seq in fasta_iter(fasta_file): + scaffold_lens.append(len(seq)) + if "NN" in seq: + contig_list = seq.split("NN") + else: + contig_list = [seq] + for contig in contig_list: + if len(contig): + gc += contig.count('G') + contig.count('C') + total_len += len(contig) + contig_lens.append(len(contig)) + gc_cont = (gc / total_len) * 100 +# print("this is gc_content", gc_cont) + return contig_lens, scaffold_lens, gc_cont + + +def calculate_stats(seq_lens, gc_cont): + naming = sys.argv[3] + stats = {} + seq_array = np.array(seq_lens) +# stats['Assembly:']=naming + stats['sequence_count'] = seq_array.size + testsize=stats['sequence_count'] + stats['number_of_gaps'] = 0 +# print("this is the count",naming," ", testsize) + stats['gc_content (%)'] = gc_cont + sorted_lens = seq_array[np.argsort(-seq_array)] + stats['longest (bp)'] = int(sorted_lens[0]) + testlongest= stats['longest (bp)'] +# print("this is the longest", naming," ",testlongest) + stats['shortest (bp)'] = int(sorted_lens[-1]) +# stats['median'] = np.median(sorted_lens) +# stats['mean'] = np.mean(sorted_lens) + stats['total_bps (bp)'] = int(np.sum(sorted_lens)) + testprint=stats['total_bps (bp)'] +# print("total_bp is", naming," ",testprint) + stats['estimated_size (bp)'] = int(size_bp) + csum = np.cumsum(sorted_lens) + # if stats['total_bps (bp)'] < stats['estimated_size (bp)']: + # csum_ng = np.append(csum, stats['estimated_size (bp)']) + # else: + # csum_ng=csum + for level in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 96, 97, 98, 99, 100]: + nx = int(stats['total_bps (bp)'] * (level / 100)) + csumn = min(csum[csum >= nx]) + l_level = int(np.where(csum == csumn)[0]) + 1 + stats['L' + str(level)] = l_level + for level in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 96, 97, 98, 99, 100]: +# print("the totalbps are:", stats['total_bps (bp)']) + nx = int(stats['total_bps (bp)'] * (level / 100)) +# print("this is the nx", nx) +# print("this is the csum", csum) + csumn = min(csum[csum >= nx]) +# print("this is the csumn", csumn) + l_level = int(np.where(csum == csumn)[0]) + n_level = int(sorted_lens[l_level]) + stats['N' + str(level)] = n_level +# print(level, " ", n_level) + for level in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 96, 97, 98, 99, 100]: +# print("the estbps are:", stats['estimated_size (bp)']) + ngx = int(stats['estimated_size (bp)'] * (level / 100)) +# print("this is the ngx", ngx) +# print("this is the csum", csum_ng) +# print("this is the csum", csum) +# print("this is the [csum >= ngx]", np.array(csum >= ngx)) + new_array=np.array(csum >= ngx) + # print(np.any(new_array)) + if np.any(new_array) == False: + csumng = csum[seq_array.size-1] + # print("this is the csumng", csumng) + lg_level = int(np.where(csum == csumng)[0]) + 1 + stats['LG' + str(level)] = lg_level + elif np.any(new_array) == True: + csumng = min(csum[csum >= ngx]) + # print("this is the csumng", csumng) + lg_level = int(np.where(csum == csumng)[0]) + 1 + stats['LG' + str(level)] = lg_level + for level in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 96, 97, 98, 99, 100]: + ngx = int(stats['estimated_size (bp)'] * (level / 100)) + new_array=np.array(csum >= ngx) + # print(np.any(new_array)) + if np.any(new_array) == False: + csumng = csum[seq_array.size-1] + # print("this is the csumng", csumng) + lg_level = int(np.where(csum == csumng)[0]) + ng_level = int(sorted_lens[lg_level]) + stats['NG' + str(level)] = ng_level + elif np.any(new_array) == True: + csumng = min(csum[csum >= ngx]) + # print("this is the csumng", csumng) + lg_level = int(np.where(csum == csumng)[0]) + ng_level = int(sorted_lens[lg_level]) + stats['NG' + str(level)] = ng_level + return stats + + +if __name__ == "__main__": +# print(size_bp) +# print(type(size_bp)) + naming = sys.argv[3] + infilename = sys.argv[1] + contig_lens, scaffold_lens, gc_cont = read_genome(infilename) +# contig_stats = calculate_stats(contig_lens, gc_cont) + scaffold_stats = calculate_stats(scaffold_lens, gc_cont) + rounded_gc = round(gc_cont, 2) +# print("this is the rounded_gc: ", rounded_gc) + contig_stats = calculate_stats(contig_lens, gc_cont) + gaps=contig_stats.get('sequence_count') - scaffold_stats.get('sequence_count') + scaffold_stats['number_of_gaps'] = gaps + contig_stats['number_of_gaps'] = gaps +# print("this is the scaffold_stats: ", scaffold_stats) +# print(scaffold_stats) +# df_scaffold_all= pd.DataFrame.from_dict(scaffold_stats, orient= 'index') +# print(df_scaffold_all) +# df2 = pd.DataFrame([scaffold_stats]).T +# print(df2) +# s = pd.Series(scaffold_stats, name=naming) +# s.index.name = 'Assembly:' +# s.reset_index() +# print(s) + scaff_seq=pd.DataFrame(scaffold_stats.items(), columns=['Assembly:', naming]) +# print("this is the scaff_seq: ", scaff_seq) + df_scaffold_top=scaff_seq.iloc[0:7,0:2] +# print("this is GC CONTENT", gc_cont) + df_scaffold_top[naming]=df_scaffold_top[naming].astype('int64').apply('{:,}'.format) +# print("this is the top: ", df_scaffold_top) + df_scaffold_top[naming][2] = rounded_gc +# print("this is df_scaffold_top[naming][2]", df_scaffold_top[naming][2]) +# print("this is the top: ", df_scaffold_top) + # df_scaffold_top=df_scaffold_top.style.hide_index() +# df_scaffold_top[naming].round(decimals=0) + # df_contig_all=pd.DataFrame(data=contig_stats) + # df_contig_top=df_contig_all.iloc[0:6,0:2] + df_scaffold_Nxx=pd.DataFrame(scaffold_stats.items(), columns=['Nxx Level (%)', 'Length of Nxx Scaffold (bp)']) + df_scaffold_Nxx=df_scaffold_Nxx.iloc[31:55,0:2] + df_scaffold_Nxx=df_scaffold_Nxx.reset_index() +# print(df_scaffold_Nxx) + df_scaffold_NGxx=pd.DataFrame(scaffold_stats.items(), columns=['NGxx Level (%)', 'Length of NGxx Scaffold (bp)']) + df_scaffold_NGxx=df_scaffold_NGxx.iloc[79:104,0:2] + df_scaffold_NGxx=df_scaffold_NGxx.reset_index() +# print(df_scaffold_NGxx) + df_scaffold_N_NG=pd.concat([df_scaffold_Nxx,df_scaffold_NGxx], axis=1) + df_scaffold_N_NG=df_scaffold_N_NG.drop(df_scaffold_N_NG.columns[[0,3]], axis = 1) + df_scaffold_N_NG['Length of Nxx Scaffold (bp)']=df_scaffold_N_NG['Length of Nxx Scaffold (bp)'].astype('int64').apply('{:,}'.format) + df_scaffold_N_NG['Length of NGxx Scaffold (bp)']=df_scaffold_N_NG['Length of NGxx Scaffold (bp)'].astype('int64').apply('{:,}'.format) + # df_scaffold_N_NG=df_scaffold_N_NG.style.hide_index() + df_scaffold_Lxx=pd.DataFrame(scaffold_stats.items(), columns=['Lxx Level (%)', 'Count of scaffolds (for Lxx Level)']) + df_scaffold_Lxx=df_scaffold_Lxx.iloc[7:31,0:2] + df_scaffold_Lxx=df_scaffold_Lxx.reset_index() +# print(df_scaffold_Nxx) + df_scaffold_LGxx=pd.DataFrame(scaffold_stats.items(), columns=['LGxx Level (%)', 'Count of scaffolds (for LGxx Level)']) + df_scaffold_LGxx=df_scaffold_LGxx.iloc[55:79,0:2] + df_scaffold_LGxx=df_scaffold_LGxx.reset_index() +# print(df_scaffold_NGxx) + df_scaffold_L_LG=pd.concat([df_scaffold_Lxx,df_scaffold_LGxx], axis=1) + df_scaffold_L_LG=df_scaffold_L_LG.drop(df_scaffold_L_LG.columns[[0,3]], axis = 1) + df_scaffold_L_LG['Count of scaffolds (for Lxx Level)']=df_scaffold_L_LG['Count of scaffolds (for Lxx Level)'].astype('int64').apply('{:,}'.format) + df_scaffold_L_LG['Count of scaffolds (for LGxx Level)']=df_scaffold_L_LG['Count of scaffolds (for LGxx Level)'].astype('int64').apply('{:,}'.format) +################################################################################################################ + + contig_seq=pd.DataFrame(contig_stats.items(), columns=['Assembly:', naming]) + df_contig_top=contig_seq.iloc[0:7,0:2] + df_contig_top[naming]=df_contig_top[naming].astype('int64').apply('{:,}'.format) + df_contig_top[naming][2] = rounded_gc + # df_scaffold_top=df_scaffold_top.style.hide_index() +# df_scaffold_top[naming].round(decimals=0) + # df_contig_all=pd.DataFrame(data=contig_stats) + # df_contig_top=df_contig_all.iloc[0:6,0:2] + df_contig_Nxx=pd.DataFrame(contig_stats.items(), columns=['Nxx Level (%)', 'Length of Nxx contig (bp)']) + df_contig_Nxx=df_contig_Nxx.iloc[31:55,0:2] + df_contig_Nxx=df_contig_Nxx.reset_index() +# print(df_scaffold_Nxx) + df_contig_NGxx=pd.DataFrame(contig_stats.items(), columns=['NGxx Level (%)', 'Length of NGxx contig (bp)']) + df_contig_NGxx=df_contig_NGxx.iloc[79:104,0:2] + df_contig_NGxx=df_contig_NGxx.reset_index() +# print(df_scaffold_NGxx) + df_contig_N_NG=pd.concat([df_contig_Nxx,df_contig_NGxx], axis=1) + df_contig_N_NG=df_contig_N_NG.drop(df_contig_N_NG.columns[[0,3]], axis = 1) + df_contig_N_NG['Length of Nxx contig (bp)']=df_contig_N_NG['Length of Nxx contig (bp)'].astype('int64').apply('{:,}'.format) + df_contig_N_NG['Length of NGxx contig (bp)']=df_contig_N_NG['Length of NGxx contig (bp)'].astype('int64').apply('{:,}'.format) + # df_scaffold_N_NG=df_scaffold_N_NG.style.hide_index() + df_contig_Lxx=pd.DataFrame(contig_stats.items(), columns=['Lxx Level (%)', 'Count of contig (for Lxx Level)']) + df_contig_Lxx=df_contig_Lxx.iloc[7:31,0:2] + df_contig_Lxx=df_contig_Lxx.reset_index() +# print(df_scaffold_Nxx) + df_contig_LGxx=pd.DataFrame(contig_stats.items(), columns=['LGxx Level (%)', 'Count of contig (for LGxx Level)']) + df_contig_LGxx=df_contig_LGxx.iloc[55:79,0:2] + df_contig_LGxx=df_contig_LGxx.reset_index() +# print(df_scaffold_NGxx) + df_contig_L_LG=pd.concat([df_contig_Lxx,df_contig_LGxx], axis=1) + df_contig_L_LG=df_contig_L_LG.drop(df_contig_L_LG.columns[[0,3]], axis = 1) + df_contig_L_LG['Count of contig (for Lxx Level)']=df_contig_L_LG['Count of contig (for Lxx Level)'].astype('int64').apply('{:,}'.format) + df_contig_L_LG['Count of contig (for LGxx Level)']=df_contig_L_LG['Count of contig (for LGxx Level)'].astype('int64').apply('{:,}'.format) + # df_scaffold_L_LG=df_scaffold_L_LG.style.hide_index() + # print(df_contig_top) +# print(scaffold_stats) +# stat_output = {'Contig Stats': contig_stats, +# 'Scaffold Stats': scaffold_stats} +# print(json.dumps(stat_output, indent=2, sort_keys=False)) +# scaffold_out = {scaffold_stats} +# contig_out = {contig_stats} + + +# print(json.dumps(scaffold_stats, indent=2, sort_keys=False)) +# print(json.dumps(contig_stats, indent=2, sort_keys=False)) + # dict_writer = csv.DictWriter(stat_output, fieldnames=None, delimiter='\t') + # print(dict_writer) +# df_raw = pd.DataFrame.from_dict(data, orient='index') + # print(df_raw) +# with open('statdata.txt', 'w') as outfile: +# json.dump(scaffold_stats, outfile) +# with open('contigdata.txt', 'w') as out2file: +# json.dump(contig_stats, out2file) + # scaff = csv.writer(open(naming + "_scaffold_stats.tsv", "w"), delimiter='\t') + # for key, val in scaffold_stats.items(): + # scaff.writerow([key, val]) + # + # contig = csv.writer(open(naming + "_contig_stats.tsv", "w"), delimiter='\t') + # for key, val in contig_stats.items(): + # + with open(sys.argv[5], 'w') as outputfile: +# # print('#' + libraryName, file=outputfile) +# # print("Total Reads Processed (Paired): " + total_processed + " ( 100 %)", file=outputfile) +# # print("Discarded reads (Paired): " + discarded + " ( "+str(discarded_perc)+"%)", file=outputfile) +# # print("Successfully Processed reads (Paired): " + successfull + " ( "+str(successfull_perc)+"%)", file=outputfile) + print(df_scaffold_top.to_string(index=False), file=outputfile) + print("", file=outputfile) + print(df_scaffold_N_NG.to_string(index=False), file=outputfile) + print("", file=outputfile) + print(df_scaffold_L_LG.to_string(index=False), file=outputfile) + + with open(sys.argv[6], 'w') as outputfile2: +# # print('#' + libraryName, file=outputfile) +# # print("Total Reads Processed (Paired): " + total_processed + " ( 100 %)", file=outputfile) +# # print("Discarded reads (Paired): " + discarded + " ( "+str(discarded_perc)+"%)", file=outputfile) +# # print("Successfully Processed reads (Paired): " + successfull + " ( "+str(successfull_perc)+"%)", file=outputfile) + print(df_contig_top.to_string(index=False), file=outputfile2) + print("", file=outputfile2) + print(df_contig_N_NG.to_string(index=False), file=outputfile2) + print("", file=outputfile2) + print(df_contig_L_LG.to_string(index=False), file=outputfile2) + + # with open(sys.argv[4], 'w') as outputRst: + # print(tabulate(df_scaffold_top, headers='keys',tablefmt="rst", showindex=False), file=outputRst) + # print("", file=outputRst) + # print(tabulate(df_scaffold_N_NG, headers='keys',tablefmt="rst", showindex=False), file=outputRst) + # print("", file=outputRst) + # print(tabulate(df_scaffold_L_LG, headers='keys',tablefmt="rst", showindex=False), file=outputRst) + # print("", file=outputRst) + # # + # with open(sys.argv[5], 'w') as outputRst2: + # print(tabulate(df_contig_top, headers='keys',tablefmt="rst", showindex=False), file=outputRst2) + # print("", file=outputRst2) + # print(tabulate(df_contig_N_NG, headers='keys',tablefmt="rst", showindex=False), file=outputRst2) + # print("", file=outputRst2) + # print(tabulate(df_contig_L_LG, headers='keys',tablefmt="rst", showindex=False), file=outputRst2) + # print("", file=outputRst2) + + # with open(sys.argv[4], 'w') as outputRst: + # print(tabulate(df_scaffold_top, headers='keys',tablefmt="pipe", showindex=False), file=outputRst) + # print("", file=outputRst) + # print(tabulate(df_scaffold_N_NG, headers='keys',tablefmt="pipe", showindex=False), file=outputRst) + # print("", file=outputRst) + # print(tabulate(df_scaffold_L_LG, headers='keys',tablefmt="pipe", showindex=False), file=outputRst) + # print("", file=outputRst) + # # + # with open(sys.argv[5], 'w') as outputRst2: + # print(tabulate(df_contig_top, headers='keys',tablefmt="pipe", showindex=False), file=outputRst2) + # print("", file=outputRst2) + # print(tabulate(df_contig_N_NG, headers='keys',tablefmt="pipe", showindex=False), file=outputRst2) + # print("", file=outputRst2) + # print(tabulate(df_contig_L_LG, headers='keys',tablefmt="pipe", showindex=False), file=outputRst2) + # print("", file=outputRst2) + # list_of_dfs=[df_scaffold_top,df_scaffold_N_NG,df_scaffold_L_LG] + # for df in list_of_dfs: + # with open('all_dfs.tsv','a') as f: + # df.to_csv(f) + # f.write("\n") +# with open("testok.tsv", 'w') as outputfile: +# # print('#' + libraryName, file=outputfile) +# # print("Total Reads Processed (Paired): " + total_processed + " ( 100 %)", file=outputfile) +# # print("Discarded reads (Paired): " + discarded + " ( "+str(discarded_perc)+"%)", file=outputfile) +# # print("Successfully Processed reads (Paired): " + successfull + " ( "+str(successfull_perc)+"%)", file=outputfile) +# print(tabulate(loadinTop, headers='keys',tablefmt="rst", showindex=False), file=outputfile) +# # print('', file=outputfile) +# print(tabulate(result, headers='keys', tablefmt="psql", showindex=False), file=outputfile) +# print('', file=outputfile) diff --git a/scripts/tableOnSamePage.css b/scripts/tableOnSamePage.css new file mode 100644 index 0000000..ad4e7ad --- /dev/null +++ b/scripts/tableOnSamePage.css @@ -0,0 +1,3 @@ +<style> +.main-container { width: 1200px; max-width:2800px;} +</style> diff --git a/scripts/two_read_bam_combiner.pl b/scripts/two_read_bam_combiner.pl new file mode 100644 index 0000000..96e6149 --- /dev/null +++ b/scripts/two_read_bam_combiner.pl @@ -0,0 +1,124 @@ +#!/usr/bin/perl + +use strict; + +MAIN : { + + my ($read1_bam, $read2_bam) = @ARGV; + + if ((not defined $read1_bam) || + (not defined $read2_bam)) { + die ("Usage: ./two_read_bam_combiner.pl <read 1 bam> <read 2 bam>\n"); + } + + open(FILE1,"samtools view -h $read1_bam |"); + open(FILE2,"samtools view -h $read2_bam |"); + + my $line1 = <FILE1>; + my $line2 = <FILE2>; + + my $counter = 0; + my $new_counter = 0; + + while (defined $line1) { + ## the line directly below this needed to be modified slightly from the genotyping pipeline ## + if ($line1 =~ /^(\@)SQ/){ + if ($line1 ne $line2){print $line1;print $line2; die ("inconsistent bam header.");} + else{ + print $line1; + } + $line1 = <FILE1>; + $line2 = <FILE2>; + next; + } + + $counter++; + if ($counter == ($new_counter + 1000000)) { + print STDERR $counter . "\n"; + $new_counter = $counter; + } + + chomp $line1; + chomp $line2; + + my ($id1, $flag1, $chr_from1, $loc_from1, $mapq1, $cigar1, $d1_1, $d2_1, $d3_1, $read1, $read_qual1, @rest1) = split(/\t/,$line1); + my ($id2, $flag2, $chr_from2, $loc_from2, $mapq2, $cigar2, $d1_2, $d2_2, $d3_2, $read2, $read_qual2, @rest2) = split(/\t/,$line2); + + if ($id1 ne $id2) { + die ("The id's of the two files do not match up at line number $counter\n"); + } + + my $bin1 = reverse(dec2bin($flag1)); + my $bin2 = reverse(dec2bin($flag2)); + + my @binary1 = split(//,$bin1); + my @binary2 = split(//,$bin2); + + my $trouble = 0; + if (($binary1[2] == 1) || ($mapq1 < 10)) { + $trouble = 1; + } + if (($binary2[2]== 1) || ($mapq2 < 10)) { + $trouble = 1; + } + + my $proper_pair1; + my $proper_pair2; + my $dist1; + my $dist2; + + if (($binary1[2] == 0) && ($binary2[2] == 0)) { + $proper_pair1 = 1; + $proper_pair2 = 1; + if ($chr_from1 eq $chr_from2) { + my $dist = abs($loc_from1 - $loc_from2); + if ($loc_from1 >= $loc_from2) { + $dist1 = -1*$dist; + $dist2 = $dist; + } else { + $dist1 = $dist; + $dist2 = -1*$dist; + } + } else { + $dist1 = 0; + $dist2 = 0; + } + } else { + $proper_pair1 = 0; + $proper_pair2 = 0; + $dist1 = 0; + $dist2 = 0; + } + + my $new_bin1 = join("","000000000000000000000",$binary1[10],$binary1[9],"0","0","1",$binary2[4],$binary1[4],$binary2[2],$binary1[2],$proper_pair1,"1"); + my $new_bin2 = join("","000000000000000000000",$binary2[10],$binary2[9],"0","1","0",$binary1[4],$binary2[4],$binary1[2],$binary2[2],$proper_pair2,"1"); + + my $new_flag1 = bin2dec($new_bin1); + my $new_flag2 = bin2dec($new_bin2); + + unless ($trouble == 1) { + + print(join("\t",$id1,$new_flag1,$chr_from1,$loc_from1,$mapq1,$cigar1,$chr_from2,$loc_from2,$dist1,$read1,$read_qual1,@rest1) . "\n"); + print(join("\t",$id2,$new_flag2,$chr_from2,$loc_from2,$mapq2,$cigar2,$chr_from1,$loc_from1,$dist2,$read2,$read_qual2,@rest2) . "\n"); + + } + + $line1 = <FILE1>; + $line2 = <FILE2>; + + } + +} + + + +sub dec2bin { + + my $str = unpack("B32", pack("N", shift)); + return $str; + +} + +sub bin2dec { + return unpack("N", pack("B32", substr("0" x 32 . shift, -32))); +} -- GitLab