diff --git a/Dockerfile b/Dockerfile index a4198229..779ce883 100755 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,8 @@ FROM nfcore/base -MAINTAINER Harshil Patel LABEL authors="harshil.patel@crick.ac.uk" \ - description="Docker image containing all requirements for the nfcore/atacseq pipeline" + maintainer="harshil.patel@crick.ac.uk" \ + description="Docker image containing all requirements for the nfcore/atacseq pipeline" \ COPY environment.yml / RUN conda env create -f /environment.yml && conda clean -a diff --git a/conf/crick.config b/conf/crick.config deleted file mode 100755 index aa356e80..00000000 --- a/conf/crick.config +++ /dev/null @@ -1,23 +0,0 @@ -/* - * ------------------------------------------------- - * Nextflow config file for CAMP HPC @ The Crick - * ------------------------------------------------- - */ - -singularity { - enabled = true - autoMounts = true -} - -process { - beforeScript = 'module load Singularity/2.6.0-foss-2016b' - executor = 'slurm' -} - -params { - max_memory = 224.GB - max_cpus = 32 - max_time = 72.h - - igenomes_base = '/camp/svc/reference/Genomics/iGenomes' -} diff --git a/conf/test.config b/conf/test.config index 57d0d02a..3e8f2e74 100755 --- a/conf/test.config +++ b/conf/test.config @@ -13,11 +13,11 @@ params { max_time = 12.h // Input data - design = 'https://github.com/nf-core/test-datasets/raw/atacseq/design.csv' + design = 'https://raw.githubusercontent.com/nf-core/test-datasets/atacseq/design.csv' // Genome references - use existing files from nf-core/chipseq - fasta = 'https://github.com/nf-core/test-datasets/raw/chipseq/reference/genome.fa' - gtf = 'https://github.com/nf-core/test-datasets/raw/chipseq/reference/genes.gtf' + fasta = 'https://raw.githubusercontent.com/nf-core/test-datasets/chipseq/reference/genome.fa' + gtf = 'https://raw.githubusercontent.com/nf-core/test-datasets/chipseq/reference/genes.gtf' // Not mandatory but permits the pipeline to run through peak-calling steps macs_gsize = 1.2e7 diff --git a/docs/configuration/adding_your_own.md b/docs/configuration/adding_your_own.md index c0fce337..7551ea49 100755 --- a/docs/configuration/adding_your_own.md +++ b/docs/configuration/adding_your_own.md @@ -2,11 +2,11 @@ It is entirely possible to run this pipeline on other clusters, though you will need to set up your own config file so that the pipeline knows how to work with your cluster. -> If you think that there are other people using the pipeline who would benefit from your configuration (eg. other common cluster setups), please let us know. We can add a new configuration and profile which can used by specifying `-profile ` when running the pipeline. +> If you think that there are other people using the pipeline who would benefit from your configuration (eg. other common cluster setups), please let us know. We can add a new configuration and profile which can used by specifying `-profile ` when running the pipeline. The config file will then be hosted at `nf-core/configs` and will be pulled automatically before the pipeline is executed. If you are the only person to be running this pipeline, you can create your config file as `~/.nextflow/config` and it will be applied every time you run Nextflow. Alternatively, save the file anywhere and reference it when running the pipeline with `-c path/to/config` (see the [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html) for more). -A basic configuration comes with the pipeline, which runs by default (the `standard` config profile - see [`conf/base.config`](../../conf/base.config)). This means that you only need to configure the specifics for your system and overwrite any defaults that you want to change. +A basic configuration comes with the pipeline, which loads the [`conf/base.config`](../../conf/base.config) by default. This means that you only need to configure the specifics for your system and overwrite any defaults that you want to change. ## Cluster Environment By default, pipeline uses the `local` Nextflow executor - in other words, all jobs are run in the login session. If you're using a simple server, this may be fine. If you're using a compute cluster, this is bad as all jobs will run on the head node. diff --git a/docs/configuration/crick.md b/docs/configuration/crick.md deleted file mode 100644 index 3449e88b..00000000 --- a/docs/configuration/crick.md +++ /dev/null @@ -1,38 +0,0 @@ -# nf-core/atacseq: Crick (CAMP HPC) Configuration - -This pipeline has been successfully configured for use on the CAMP HPC cluster at the [The Francis Crick Institute](https://www.crick.ac.uk/). - -To use, run the pipeline with `-profile crick`. This will launch the [`crick.config`](../../conf/crick.config) which has been pre-configured with a setup suitable for the CAMP HPC cluster. Using this profile, Nextflow will download a temporary singularity image with all of the required software before execution of the pipeline. - -Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on CAMP. You can do this by issuing the commands below: - -``` -module purge -module load Nextflow/0.32.0 -module load Singularity/2.6.0-foss-2016b - -nextflow run nf-core/atacseq -profile crick --genome GRCh37 --design /path/to/design.csv --email test.user@crick.ac.uk -``` - -A local copy of the iGenomes resource has been made available on CAMP so you should be able to run the pipeline against any reference available in the [`igenomes.config`](../../conf/igenomes.config) by simply using the `--genome ` parameter. Some of the more exotic genomes may not have been downloaded onto CAMP so have a look in the `igenomes_base` path specified in [`crick.config`](../../conf/crick.config), and if your genome of interest isnt present please contact [BABS](mailto:bioinformatics@crick.ac.uk). - -Alternatively, if you are running the pipeline regularly for genomes that arent available in the iGenomes resource, we recommend creating a config file with paths to your reference genome indices (see [`reference genomes documentation`](reference_genomes.md) for instructions). - -If for some reason the pipeline fails to run you can resume it by adding `-resume` to the `nextflow run` command (see [`usage.md`](../usage.md)). - -All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway. If you wish to keep the `work/` directory for traceability then another solution would be to just delete the largest files. You can execute the commands below from **within** the `work/` directory: - -``` -find ./ -type f -name *.fq.gz -exec rm -f {} \; -find ./ -type f -name *.sai -exec rm -f {} \; -find ./ -type f -name *.sam -exec rm -f {} \; -find ./ -type f -name *.bam -exec rm -f {} \; -find ./ -type f -name *.bedGraph -exec rm -f {} \; -find ./ -type f -name *.bigWig -exec rm -f {} \; -``` - ->NB: You will not be able to `-resume` the pipeline if you delete the `work/` directory because it contains all of the necessary intermediate files. Please make sure you delete it after the pipeline has finished successfully. - ->NB: You will need an account to use the HPC cluster on CAMP in order to run the pipeline. If in doubt contact IT. - ->NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT. diff --git a/docs/installation.md b/docs/installation.md index de9cc40c..590cd324 100755 --- a/docs/installation.md +++ b/docs/installation.md @@ -14,7 +14,6 @@ To start using the nf-core/atacseq pipeline, follow the steps below: 4. [Reference genomes](#4-reference-genomes) 5. [Appendices](#5-appendices) * [Running on UPPMAX](#running-on-uppmax) - * [Running at the Crick](#running-at-the-crick) ## 1) Install NextFlow Nextflow runs on most POSIX systems (Linux, Mac OSX etc). It can be installed by running the following commands: @@ -119,7 +118,3 @@ Note that you will need to specify your UPPMAX project ID when running a pipelin ```nextflow params.project = 'project_ID' // eg. b2017123 ``` - -#### Running at the Crick - -See [`docs/configuration/crick.md`](configuration/crick.md) diff --git a/docs/usage.md b/docs/usage.md index abb3e415..17579f2e 100755 --- a/docs/usage.md +++ b/docs/usage.md @@ -8,14 +8,11 @@ * [Reproducibility](#reproducibility) * [Main arguments](#main-arguments) * [`-profile`](#-profile-single-dash) - * [`standard`](#standard) + * [`awsbatch`](#awsbatch) + * [`conda`](#conda) * [`docker`](#docker) * [`singularity`](#singularity) - * [`conda`](#conda) - * [`awsbatch`](#awsbatch) - * [`crick`](#crick) * [`test`](#test) - * [`none`](#none) * [`--design`](#--design) * [Generic arguments](#generic-arguments) * [`--singleEnd`](#--singleEnd) @@ -31,6 +28,7 @@ * [`--macs_gsize`](#--macs_gsize) * [`--blacklist`](#--blacklist) * [`--saveReference`](#--saveReference) + * [`--igenomesIgnore`](#--igenomesignore) * [Adapter trimming](#adapter-trimming) * [`--skipTrimming`](#--skipTrimming) * [`--saveTrimmed`](#--saveTrimmed) @@ -53,10 +51,11 @@ * [`-name`](#-name-single-dash) * [`-resume`](#-resume-single-dash) * [`-c`](#-c-single-dash) + * [`--custom_config_version`](#--custom_config_version) * [`--max_memory`](#--max_memory) * [`--max_time`](#--max_time) * [`--max_cpus`](#--max_cpus) - * [`--plaintext_emails`](#--plaintext_emails) + * [`--plaintext_email`](#--plaintext_email) * [`--multiqc_config`](#--multiqc_config) ## General Nextflow info @@ -104,28 +103,22 @@ This version number will be logged in reports when you run the pipeline, so that ### `-profile` Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments. Note that multiple profiles can be loaded, for example: `-profile standard,docker` - the order of arguments is important! -* `standard` - * The default profile, used if `-profile` is not specified at all. - * Runs locally and expects all software to be installed and available on the `PATH`. +If `-profile` is not specified at all the pipeline will be run locally and expects all software to be installed and available on the `PATH`. + +* `awsbatch` + * A generic configuration profile to be used with AWS Batch. +* `conda` + * A generic configuration profile to be used with [conda](https://conda.io/docs/) + * Pulls most software from [Bioconda](https://bioconda.github.io/) * `docker` * A generic configuration profile to be used with [Docker](http://docker.com/) * Pulls software from dockerhub: [`nfcore/atacseq`](http://hub.docker.com/r/nfcore/atacseq/) * `singularity` * A generic configuration profile to be used with [Singularity](http://singularity.lbl.gov/) * Pulls software from singularity-hub -* `conda` - * A generic configuration profile to be used with [conda](https://conda.io/docs/) - * Pulls most software from [Bioconda](https://bioconda.github.io/) -* `awsbatch` - * A generic configuration profile to be used with AWS Batch. -* `crick` - * Designed to use Singularity on the CAMP HPC system at [The Francis Crick Institute](https://www.crick.ac.uk/) - * See [`docs/configuration/crick.md`](configuration/crick.md) * `test` * A profile with a complete configuration for automated testing * Includes links to test data so needs no other parameters -* `none` - * No configuration at all. Useful if you want to build your own config from scratch and want to avoid loading in the default `base` config profile (not recommended). ### `--design` You will need to create a design file with information about the samples in your experiment before running the pipeline. Use this parameter to specify its location. @@ -178,7 +171,7 @@ The pipeline config files come bundled with paths to the illumina iGenomes refer ### `--genome` (using iGenomes) There are 31 different species supported in the iGenomes references. To run the pipeline, you must specify which to use with the `--genome` flag. -You can find the keys to specify the genomes in the [`igenomes.config`](../conf/igenomes.config). Common genomes that are supported are: +You can find the keys to specify the genomes in the [`iGenomes config file`](../conf/igenomes.config). Common genomes that are supported are: * Human * `--genome GRCh37` @@ -251,6 +244,9 @@ If provided, alignments that overlap with the regions in this file will be filte ### `--saveReference` Supply this parameter to save any generated reference genome files such as the BWA index to your results folder. These can then be used for future pipeline runs, reducing processing times. +### `--igenomesIgnore` +Do not load `igenomes.config` when running the pipeline. You may choose this option if you observe clashes between custom parameters and those supplied in `igenomes.config`. + ## Adapter trimming The pipeline accepts a number of parameters to change how the trimming is done, according to your data type. You can specify custom trimming parameters as follows: @@ -292,7 +288,11 @@ By default, intermediate BAM files will not be saved. The final BAM files create Each step in the pipeline has a default set of requirements for number of CPUs, memory and time. For most of the steps in the pipeline, if the job exits with an error code of `143` (exceeded requested resources) it will automatically resubmit with higher requests (2 x original, then 3 x original). If it still fails after three times then the pipeline is stopped. ### Custom resource requests -Wherever process-specific requirements are set in the pipeline, the default value can be changed by creating a custom config file. See the files in [`conf`](../conf) for examples. +Wherever process-specific requirements are set in the pipeline, the default value can be changed by creating a custom config file. See the files hosted at [`nf-core/configs`](https://github.com/nf-core/configs/tree/master/conf) for examples. + +If you are likely to be running `nf-core` pipelines regularly it may be a good idea to request that your custom config file is uploaded to the `nf-core/configs` git repository. Before you do this please can you test that the config file works with your pipeline of choice using the `-c` parameter (see definition below). You can then create a pull request to the `nf-core/configs` repository with the addition of your config file, associated documentation file (see examples in [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs)), and amending [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) to include your custom profile. + +If you have any questions or issues please send us a message on [`Slack`](https://nf-core-invite.herokuapp.com/). ## AWS Batch specific parameters Running the pipeline on AWS Batch requires a couple of specific parameters to be set according to your AWS Batch configuration. Please use the `-awsbatch` profile and then specify all of the following parameters. @@ -330,10 +330,14 @@ Specify the path to a specific config file (this is a core NextFlow command). **NB:** Single hyphen (core Nextflow option) -Note - you can use this to override defaults. For example, you can specify a config file using `-c` that contains the following: +Note - you can use this to override pipeline defaults. -```nextflow -process.$multiqc.module = [] +### `--custom_config_version` +Provide git commit id for custom Institutional configs hosted at `nf-core/configs`. This was implemented for reproducibility purposes. Default is set to `master`. + +```bash +## Download and use config file with following git commid id +--custom_config_version d52db660777c4bf36546ddb188ec530c3ada1b96 ``` ### `--max_memory` diff --git a/main.nf b/main.nf index eed8d6cb..8ef9b196 100755 --- a/main.nf +++ b/main.nf @@ -33,7 +33,7 @@ def helpMessage() { --fasta Path to Fasta reference. Not mandatory when using reference in iGenomes config via --genome --gtf Path to GTF file in Ensembl format. Not mandatory when using reference in iGenomes config via --genome -profile Configuration profile to use. Can use multiple (comma separated) - Available: standard, conda, docker, singularity, awsbatch, test, crick + Available: standard, conda, docker, singularity, awsbatch, test Generic --genome Name of iGenomes reference @@ -123,23 +123,23 @@ if( !(workflow.runName ==~ /[a-z]+_[a-z]+/) ){ /* -- CONFIG FILES -- */ //////////////////////////////////////////////////// -multiqc_config = file(params.multiqc_config) -bamtools_filter_pe_config = file(params.bamtools_filter_pe_config) -bamtools_filter_se_config = file(params.bamtools_filter_se_config) -output_docs = file("$baseDir/docs/output.md") +multiqc_config_ch = Channel.fromPath(params.multiqc_config) +bamtools_filter_pe_config_ch = Channel.fromPath(params.bamtools_filter_pe_config) +bamtools_filter_se_config_ch = Channel.fromPath(params.bamtools_filter_se_config) +output_docs_ch = Channel.fromPath("$baseDir/docs/output.md") // Header files for MultiQC custom-content -replicate_peak_count_header = file("$baseDir/assets/multiqc/replicate_peak_count_header.txt") -replicate_frip_score_header = file("$baseDir/assets/multiqc/replicate_frip_score_header.txt") -replicate_peak_annotation_header = file("$baseDir/assets/multiqc/replicate_peak_annotation_header.txt") -replicate_deseq2_pca_header = file("$baseDir/assets/multiqc/replicate_deseq2_pca_header.txt") -replicate_deseq2_clustering_header = file("$baseDir/assets/multiqc/replicate_deseq2_clustering_header.txt") - -sample_peak_count_header = file("$baseDir/assets/multiqc/sample_peak_count_header.txt") -sample_frip_score_header = file("$baseDir/assets/multiqc/sample_frip_score_header.txt") -sample_peak_annotation_header = file("$baseDir/assets/multiqc/sample_peak_annotation_header.txt") -sample_deseq2_pca_header = file("$baseDir/assets/multiqc/sample_deseq2_pca_header.txt") -sample_deseq2_clustering_header = file("$baseDir/assets/multiqc/sample_deseq2_clustering_header.txt") +replicate_peak_count_header_ch = Channel.fromPath("$baseDir/assets/multiqc/replicate_peak_count_header.txt") +replicate_frip_score_header_ch = Channel.fromPath("$baseDir/assets/multiqc/replicate_frip_score_header.txt") +replicate_peak_annotation_header_ch = Channel.fromPath("$baseDir/assets/multiqc/replicate_peak_annotation_header.txt") +replicate_deseq2_pca_header_ch = Channel.fromPath("$baseDir/assets/multiqc/replicate_deseq2_pca_header.txt") +replicate_deseq2_clustering_header_ch = Channel.fromPath("$baseDir/assets/multiqc/replicate_deseq2_clustering_header.txt") + +sample_peak_count_header_ch = Channel.fromPath("$baseDir/assets/multiqc/sample_peak_count_header.txt") +sample_frip_score_header_ch = Channel.fromPath("$baseDir/assets/multiqc/sample_frip_score_header.txt") +sample_peak_annotation_header_ch = Channel.fromPath("$baseDir/assets/multiqc/sample_peak_annotation_header.txt") +sample_deseq2_pca_header_ch = Channel.fromPath("$baseDir/assets/multiqc/sample_deseq2_pca_header.txt") +sample_deseq2_clustering_header_ch = Channel.fromPath("$baseDir/assets/multiqc/sample_deseq2_clustering_header.txt") //////////////////////////////////////////////////// /* -- VALIDATE INPUTS -- */ @@ -404,6 +404,7 @@ if(!params.bed12){ } } + /* * PREPROCESSING - Prepare genome intervals for filtering */ @@ -740,8 +741,8 @@ process filter_bam { input: set val(name), file(bam) from markdup_bam_filter file bed from genome_filter_regions.collect() - file bamtools_filter_se_config - file bamtools_filter_pe_config + file bamtools_filter_se_config from bamtools_filter_se_config_ch.collect() + file bamtools_filter_pe_config from bamtools_filter_pe_config_ch.collect() output: set val(name), file("*.{bam,bam.bai}") into filter_bam @@ -771,6 +772,7 @@ process filter_bam { """ } + /* * STEP 4.4 remove orphan reads from paired-end BAM */ @@ -996,8 +998,8 @@ process replicate_macs { input: set val(name), file(bam), file(flagstat) from merge_replicate_bam_macs.join(merge_replicate_flagstat_macs, by: [0]) - file replicate_peak_count_header - file replicate_frip_score_header + file replicate_peak_count_header from replicate_peak_count_header_ch.collect() + file replicate_frip_score_header from replicate_frip_score_header_ch.collect() output: file "*.{bed,xls,gappedPeak}" into replicate_macs_output @@ -1068,7 +1070,7 @@ process replicate_macs_qc { input: file peaks from replicate_macs_peaks_qc.collect{ it[1] } file annos from replicate_macs_annotate.collect() - file replicate_peak_annotation_header + file replicate_peak_annotation_header from replicate_peak_annotation_header_ch.collect() output: file "*.{txt,pdf}" into replicate_macs_qc @@ -1082,11 +1084,13 @@ process replicate_macs_qc { """ plot_macs_qc.r -i ${peaks.join(',')} \\ -s ${peaks.join(',').replaceAll(".${suffix}_peaks${peakext}","")} \\ - -o ./ -p macs_peak.${suffix} + -o ./ \\ + -p macs_peak.${suffix} plot_homer_annotatepeaks.r -i ${annos.join(',')} \\ -s ${annos.join(',').replaceAll(".${suffix}_peaks.annotatePeaks.txt","")} \\ - -o ./ -p macs_annotatePeaks.${suffix} + -o ./ \\ + -p macs_annotatePeaks.${suffix} cat $replicate_peak_annotation_header macs_annotatePeaks.${suffix}.summary.txt > macs_annotatePeaks.${suffix}.summary_mqc.tsv """ @@ -1146,8 +1150,8 @@ process replicate_macs_consensus_annotate { input: file bed from replicate_macs_consensus_bed file bool from replicate_macs_consensus_bool - file fasta from fasta_replicate_macs_consensus_annotate.collect() - file gtf from gtf_replicate_macs_consensus_annotate.collect() + file fasta from fasta_replicate_macs_consensus_annotate + file gtf from gtf_replicate_macs_consensus_annotate output: file "*.annotatePeaks.txt" into replicate_macs_consensus_annotate @@ -1177,8 +1181,8 @@ process replicate_macs_consensus_deseq { input: file bams from replicate_name_bam_replicate_counts.collect{ it[1] } file saf from replicate_macs_consensus_saf.collect() - file replicate_deseq2_pca_header - file replicate_deseq2_clustering_header + file replicate_deseq2_pca_header from replicate_deseq2_pca_header_ch.collect() + file replicate_deseq2_clustering_header from replicate_deseq2_clustering_header_ch.collect() output: file "*featureCounts.txt" into replicate_macs_consensus_counts @@ -1383,8 +1387,8 @@ process sample_macs { input: set val(name), file(bam), file(flagstat) from merge_sample_bam_macs.join(merge_sample_flagstat_macs, by: [0]) - file sample_peak_count_header - file sample_frip_score_header + file sample_peak_count_header from sample_peak_count_header_ch.collect() + file sample_frip_score_header from sample_frip_score_header_ch.collect() output: file "*.{bed,xls,gappedPeak}" into sample_macs_output @@ -1455,7 +1459,7 @@ process sample_macs_qc { input: file peaks from sample_macs_peaks_qc.collect{ it[1] } file annos from sample_macs_annotate.collect() - file sample_peak_annotation_header + file sample_peak_annotation_header from sample_peak_annotation_header_ch.collect() output: file "*.{txt,pdf}" into sample_macs_qc @@ -1469,11 +1473,13 @@ process sample_macs_qc { """ plot_macs_qc.r -i ${peaks.join(',')} \\ -s ${peaks.join(',').replaceAll(".${suffix}_peaks${peakext}","")} \\ - -o ./ -p macs_peak.${suffix} + -o ./ \\ + -p macs_peak.${suffix} plot_homer_annotatepeaks.r -i ${annos.join(',')} \\ -s ${annos.join(',').replaceAll(".${suffix}_peaks.annotatePeaks.txt","")} \\ - -o ./ -p macs_annotatePeaks.${suffix} + -o ./ \\ + -p macs_annotatePeaks.${suffix} cat $sample_peak_annotation_header macs_annotatePeaks.${suffix}.summary.txt > macs_annotatePeaks.${suffix}.summary_mqc.tsv """ @@ -1533,8 +1539,8 @@ process sample_macs_consensus_annotate { input: file bed from sample_macs_consensus_bed file bool from sample_macs_consensus_bool - file fasta from fasta_sample_macs_consensus_annotate.collect() - file gtf from gtf_sample_macs_consensus_annotate.collect() + file fasta from fasta_sample_macs_consensus_annotate + file gtf from gtf_sample_macs_consensus_annotate output: file "*.annotatePeaks.txt" into sample_macs_consensus_annotate @@ -1564,8 +1570,8 @@ process sample_macs_consensus_deseq { input: file bams from replicate_name_bam_sample_counts.collect{ it[1] } file saf from sample_macs_consensus_saf.collect() - file sample_deseq2_pca_header - file sample_deseq2_clustering_header + file sample_deseq2_pca_header from sample_deseq2_pca_header_ch.collect() + file sample_deseq2_clustering_header from sample_deseq2_clustering_header_ch.collect() output: file "*featureCounts.txt" into sample_macs_consensus_counts @@ -1663,7 +1669,7 @@ process multiqc { publishDir "${params.outdir}/multiqc", mode: 'copy' input: - file multiqc_config + file multiqc_config from multiqc_config_ch.collect() file ('fastqc/*') from fastqc_reports_mqc.collect() file ('trimgalore/*') from trimgalore_results_mqc.collect() file ('trimgalore/fastqc/*') from trimgalore_fastqc_reports_mqc.collect() @@ -1750,7 +1756,7 @@ process output_documentation { publishDir "${params.outdir}/Documentation", mode: 'copy' input: - file output_docs + file output_docs from output_docs_ch output: file "results_description.html" diff --git a/nextflow.config b/nextflow.config index d8b72ffa..11be20b5 100755 --- a/nextflow.config +++ b/nextflow.config @@ -57,17 +57,20 @@ params { tracedir = "${params.outdir}/pipeline_info" clusterOptions = false igenomes_base = "./iGenomes" - + igenomesIgnore = false + custom_config_version = 'master' } -profiles { +// Load base.config by default for all pipelines +includeConfig 'conf/base.config' - standard { - includeConfig 'conf/base.config' - } - conda { - process.conda = "$baseDir/environment.yml" - } +// Load nf-core custom profiles from different Institutions +includeConfig "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}/nfcore_custom.config" + +profiles { + awsbatch { includeConfig 'conf/awsbatch.config' } + conda { process.conda = "$baseDir/environment.yml" } + debug { process.beforeScript = 'echo $HOSTNAME' } docker { docker.enabled = true process.container = params.container @@ -76,26 +79,12 @@ profiles { singularity.enabled = true process.container = {"shub://${params.container.replace('nfcore', 'nf-core')}"} } - awsbatch { - includeConfig 'conf/base.config' - includeConfig 'conf/awsbatch.config' - includeConfig 'conf/igenomes.config' - } - test { - includeConfig 'conf/base.config' - includeConfig 'conf/test.config' - includeConfig 'conf/igenomes.config' - } - crick { - includeConfig 'conf/base.config' - includeConfig 'conf/crick.config' - includeConfig 'conf/igenomes.config' - } - debug { process.beforeScript = 'echo $HOSTNAME' } - none { - // Don't load any config (for use with custom home configs) - } + test { includeConfig 'conf/test.config' } +} +// Load igenomes.config if required +if(!params.igenomesIgnore){ + includeConfig 'conf/igenomes.config' } // Increase time available to build conda environment