diff --git a/assets/schema_tools.json b/assets/schema_tools.json index 1820b94e..70e258f1 100644 --- a/assets/schema_tools.json +++ b/assets/schema_tools.json @@ -12,48 +12,30 @@ }, "diff_method": { "type": "string", - "errorMessage": "choose propd, DESeq2 or none", + "errorMessage": "choose a differential analysis method (eg. deseq2, propd, limme, etc) or none", "meta": ["diff_method"] }, "args_diff": { "type": "string", "meta": ["args_diff"] }, - "enr_diff_method": { - "type": "string", - "meta": ["enr_diff_method"], - "errorMessage": "choose grea, gsea or none" - }, - "args_enr_diff": { - "type": "string", - "meta": ["args_enr_diff"] - }, "cor_method": { "type": "string", "meta": ["cor_method"], - "errorMessage": "choose correlation,proportionality, partial correlation or none" + "errorMessage": "choose a correlation method (eg. propr) or none" }, "args_cor": { "type": "string", "meta": ["args_cor"] }, - "enr_cor_method": { - "type": "string", - "meta": ["enr_cor_method"], - "errorMessage": "choose grea or none" - }, - "args_enr_cor": { - "type": "string", - "meta": ["args_enr_cor"] - }, - "sel_method": { + "enr_method": { "type": "string", - "meta": ["sel_method"], - "errorMessage": "choose filtervar or none" + "meta": ["enr_method"], + "errorMessage": "choose a functional enrichment analysis method (eg. gsea, grea, gprofiler, etc) or none" }, - "args_sel": { + "args_enr": { "type": "string", - "meta": ["args_sel"] + "meta": ["args_enr"] } }, "required": [] diff --git a/assets/tools_samplesheet.csv b/assets/tools_samplesheet.csv index afce8a3e..c5ef3831 100644 --- a/assets/tools_samplesheet.csv +++ b/assets/tools_samplesheet.csv @@ -1,6 +1,6 @@ -pathway_name,diff_method,args_diff,enr_diff_method,args_enr_diff,cor_method,args_cor,enr_cor_method,args_enr_cor,sel_method,args_sel -diff_prop,propd,--adjacency true --cutoff_min 0.05 --cutoff_max 0.95 --cutoff_interval 0.05 --permutation 100 --fixseed true,,,,,,,, -diff_prop_noperm,propd,--cutoff_min 0.05 --cutoff_max 0.95 --cutoff_interval 0.05 --permutation 0 --fixseed true,,,,,,,, -filtered_pcor,propd,--adjacency true --cutoff_min 0.05 --cutoff_max 0.95 --cutoff_interval 0.05 --permutation 100 --fixseed true,,,propr,--permutation 10 --adjacency true --cutoff_min 0.005 --cutoff_max 0.5 --cutoff_interval 0.01 --metric pcor.bshrink,,,filtervar, -prop,,,,,propr,--cutoff_min 0.05 --cutoff_max 0.95 --cutoff_interval 0.05 --fixseed true --metric rho --permutation 100 --adjacency true,,,, -diff_grea,propd,--adjacency true --cutoff_min 0.05 --cutoff_max 0.95 --cutoff_interval 0.05 --permutation 10 --fixseed true,grea,--permutation 10,,,,,, +pathway_name,diff_method,args_diff,cor_method,args_cor,enr_method,args_enr +propd,propd,--adjacency true --cutoff_min 0.05 --cutoff_max 0.95 --cutoff_interval 0.05 --permutation 10,,,, +propd_noperm,propd,--cutoff_min 0.05 --cutoff_max 0.95 --cutoff_interval 0.05 --permutation 0,,,, +pcorbshrink,,,propr,--permutation 10 --adjacency true --cutoff_min 0.005 --cutoff_max 0.5 --cutoff_interval 0.01 --metric pcor.bshrink,, +propr,,,propr,--cutoff_min 0.05 --cutoff_max 0.95 --cutoff_interval 0.05 --metric rho --permutation 10 --adjacency true,, +propd_grea,propd,--adjacency true --cutoff_min 0.05 --cutoff_max 0.95 --cutoff_interval 0.05 --permutation 10,,,grea,--permutation 10 diff --git a/conf/modules.config b/conf/modules.config index 5d987f51..a0ce2938 100644 --- a/conf/modules.config +++ b/conf/modules.config @@ -499,33 +499,15 @@ process { ] } - withName: "FILTERVAR"{ - ext.args = { "${meta.args_cor}" == "null" ? '' : "${meta.args_cor}" } - publishDir = [ - path: { "${params.outdir}/variable_selection/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') ? null : filename } - ] - } - - withName: "GREA_DIFF"{ - ext.args = { "${meta.args_enr_diff}" == "null" ? '' : "${meta.args_enr_diff}" } + withName: "GREA"{ + ext.args = { "${meta.args_enr}" == "null" ? '' : "${meta.args_enr}" } publishDir = [ path: { "${params.outdir}/enrichment_differential/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }, mode: params.publish_dir_mode, saveAs: { filename -> filename.equals('versions.yml') ? null : filename } ] } - - withName: "GREA_COR"{ - ext.args = { "${meta.args_enr_cor}" == "null" ? '' : "${meta.args_enr_cor}" } - publishDir = [ - path: { "${params.outdir}/enrichment_correlation/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') ? null : filename } - ] - } - + withName: "MYGENE" { ext.args = { [ "--columname ${params.features_id_col}", diff --git a/modules/local/filtervar/main.nf b/modules/local/filtervar/main.nf deleted file mode 100644 index 5d9c39c8..00000000 --- a/modules/local/filtervar/main.nf +++ /dev/null @@ -1,23 +0,0 @@ -process FILTERVAR { - tag "$meta.id" - label 'process_single' - - conda "${moduleDir}/environment.yml" - container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? - 'https://depot.galaxyproject.org/singularity/r-propr:5.0.3': - 'quay.io/biocontainers/r-propr:5.0.3' }" - - input: - tuple val(meta), path(count), path(adj_matrix) - - output: - tuple val(meta), path("*.count_filtered.tsv"), emit: count - path "*.R_sessionInfo.log", emit: session_info - path "versions.yml", emit: versions - - when: - task.ext.when == null || task.ext.when - - script: - template 'filtervar.R' -} diff --git a/modules/local/filtervar/templates/filtervar.R b/modules/local/filtervar/templates/filtervar.R deleted file mode 100644 index 7b662626..00000000 --- a/modules/local/filtervar/templates/filtervar.R +++ /dev/null @@ -1,275 +0,0 @@ - - -#!/usr/bin/env Rscript - - -################################################ -################################################ -## Functions ## -################################################ -################################################ - -#' Parse out options from a string without recourse to optparse -#' -#' @param x Long-form argument list like --opt1 val1 --opt2 val2 -#' -#' @return named list of options and values similar to optparse - -parse_args <- function(x){ - args_list <- unlist(strsplit(x, ' ?--')[[1]])[-1] - args_vals <- lapply(args_list, function(x) scan(text=x, what='character', quiet = TRUE)) - - # Ensure the option vectors are length 2 (key/ value) to catch empty ones - args_vals <- lapply(args_vals, function(z){ length(z) <- 2; z}) - - parsed_args <- structure(lapply(args_vals, function(x) x[2]), names = lapply(args_vals, function(x) x[1])) - parsed_args[! is.na(parsed_args)] -} - -#' Flexibly read CSV or TSV files -#' -#' @param file Input file -#' @param header Boolean. TRUE if first row is header. False without header. -#' @param row.names The first column is used as row names by default. -#' Otherwise, give another number. Or use NULL when no row.names are present. -#' -#' @return output Data frame -read_delim_flexible <- function(file, header = TRUE, row.names = 1, check.names = TRUE){ - - ext <- tolower(tail(strsplit(basename(file), split = "\\\\.")[[1]], 1)) - - if (ext == "tsv" || ext == "txt") { - separator <- "\\t" - } else if (ext == "csv") { - separator <- "," - } else { - stop(paste("Unknown separator for", ext)) - } - - mat <- read.delim( - file, - sep = separator, - header = header, - row.names = row.names, - check.names = check.names - ) - - if ( (row.names == 'gene_id') & ('gene_name' %in% colnames(mat)) ){ - mat <- mat[, -which(colnames(mat) == 'gene_name')] - } else if ( (row.names == 'gene_name') & ('gene_id' %in% colnames(mat)) ){ - mat <- mat[, -which(colnames(mat) == 'gene_id')] - } - - return(mat) -} - -read_delim_flexible2 <- function(file, header = TRUE){ - - ext <- tolower(tail(strsplit(basename(file), split = "\\\\.")[[1]], 1)) - - if (ext == "tsv" || ext == "txt") { - separator <- "\\t" - } else if (ext == "csv") { - separator <- "," - } else { - stop(paste("Unknown separator for", ext)) - } - - mat <- read.delim( - file, - sep = separator, - header = header - ) - return(mat) -} - - - -################################################ -################################################ -## Parse arguments ## -################################################ -################################################ - -opt <- list( - count = '$count', - prefix = ifelse('$task.ext.prefix' == 'null', '$meta.pathway_name', '$task.ext.prefix'), - transformation = 'clr', - reference = NA, - alpha = NA, - metric = 'pcor.bshrink', - permutation = 0, - cutoff_min = NA, - cutoff_max = NA, - cutoff_interval = NA, - ncores = as.integer('$task.cpus'), - features_id_col = 'gene_id', - fixseed = FALSE, - adjacency = FALSE, - fdrVal = 0.05, - adj_matrix = '$adj_matrix', - filterVar = 'yes' -) -opt_types <- list( - count = 'character', - prefix = 'character', - transformation = 'character', - reference = 'character', - alpha = 'numeric', - metric = 'character', - permutation = 'numeric', - cutoff_min = 'numeric', - cutoff_max = 'numeric', - cutoff_interval = 'numeric', - ncores = 'numeric', - features_id_col = 'character', - fixseed = 'logical', - adjacency = 'logical', - fdrVal = 'numeric', - adj_matrix = 'character', - filterVar = 'character' -) - - -# Apply parameter overrides -args_opt <- parse_args('$task.ext.args') - -for ( ao in names(args_opt)){ - if (! ao %in% names(opt)){ - stop(paste("Invalid option:", ao)) - } else { - - # Preserve classes from defaults where possible - if (! is.null(opt[[ao]])){ - args_opt[[ao]] <- as(args_opt[[ao]], opt_types[[ao]]) - } - # set NA - if (args_opt[[ao]] %in% c('NA', NA, 'null')){ - args_opt[[ao]] <- NA - } - opt[[ao]] <- args_opt[[ao]] - } -} - -# Check if required parameters have been provided -required_opts <- c('count') -missing <- required_opts[unlist(lapply(opt[required_opts], is.null)) | ! required_opts %in% names(opt)] -if (length(missing) > 0){ - stop(paste("Missing required options:", paste(missing, collapse=', '))) -} - -################################################ -################################################ -## Perform variable selection ## -################################################ -################################################ - -# read matrix -A <- read_delim_flexible( - opt\$adj_matrix, - header = TRUE, - row.names = 1, - check.names = TRUE -) - -count <- read_delim_flexible2( - opt\$count, - header = TRUE -) - -### Determine most differentially proportional genes - -# Set diagonal in A to 0 -diag(A) <- 0 - -# Sum values in adjacency and add as an extra column -per_gene_connection <- rowSums(A) - -A\$per_gene <- per_gene_connection - -A <- A[order(A\$per_gene, decreasing = TRUE),] - -# Define selection criteria - -max_gene_number <- ncol(count)*10 # 10x samples for technical reasons (pcor) - -#Calculate connection threshold -total_connections <- sum(per_gene_connection)/2 # 2 because the matrix is symmetric -possible_connections <- nrow(count)*(nrow(count)-1)/2 - -percentage_expected <- total_connections/possible_connections -connection_threshold <- percentage_expected * nrow(count) - -# Filter count matrix according to selected genes - -col_genes <- which(names(count) == opt\$features_id_col) - -if (opt\$filterVar == 'yes'){ - # select only differentially proportional genes - top_genes <- rownames(A[which(A\$per_gene > connection_threshold),]) - count_filtered <- count[count[,col_genes] %in% top_genes,] - warning("non differentially proportional genes were removed before correlation analysis") - -} else if (max_gene_number < nrow(count) & opt\$metric== 'pcor.bshrink'){ - # select the maximum number of genes to perform partial correlation - top_genes <- rownames(A[1:gene_number,]) - count_filtered <- count[count[,col_genes] %in% top_genes,] - warning("some genes were removed to perform partial correlation") - -}else{ - # no genes were removed - count_filtered <- count - warning("No genes were removed") -} - - -################################################ -################################################ -## Generate outputs ## -################################################ -################################################ - -write.table( - count_filtered, - file = paste0(opt\$prefix, '.count_filtered.tsv'), - col.names = TRUE, - row.names = FALSE, - sep = '\t', - quote = FALSE -) - -################################################ -################################################ -## WARNINGS ## -################################################ -################################################ - -sink(paste0(opt\$prefix, ".warnings.log")) -print(warnings()) -sink() - -################################################ -################################################ -## R SESSION INFO ## -################################################ -################################################ - -sink(paste0(opt\$prefix, ".R_sessionInfo.log")) -print(sessionInfo()) -sink() - -################################################ -################################################ -## VERSIONS FILE ## -################################################ -################################################ - -propr.version <- as.character(packageVersion('propr')) - -writeLines( - c( - '"${task.process}":', - paste(' r-propr:', propr.version) - ), -'versions.yml') - diff --git a/subworkflows/local/correlation/main.nf b/subworkflows/local/correlation/main.nf index b586f369..1f7e5e18 100644 --- a/subworkflows/local/correlation/main.nf +++ b/subworkflows/local/correlation/main.nf @@ -5,38 +5,41 @@ include {PROPR_PROPR as PROPR} from "../../../modules/nf-core/propr/propr/main.n workflow CORRELATION { take: - ch_counts ch_tools - ch_counts_filtered + ch_counts main: - ch_counts - .combine(ch_tools) - .map { - metacounts, counts, metatools -> - [ metacounts+metatools, counts ] - } + + // initialize empty results channels + ch_results = Channel.empty() + ch_adjacency = Channel.empty() + + // branch tools to select the correct correlation analysis method + ch_tools .branch { propr: it[0]["cor_method"] == "propr" } - .set { ch_counts_cor } + .set { ch_tools_single } - // Create a branch of the channel to retrieve the normal counts when there is no variable selection. - ch_counts_cor.propr - .branch{ - no_sel: it[0]["sel_method"] == null - sel: it[0]["sel_method"] != null - } - .set { ch_counts_selection } + // ---------------------------------------------------- + // Perform correlation analysis with propr + // ---------------------------------------------------- - ch_propr = ch_counts_filtered.mix(ch_counts_selection.no_sel) + ch_counts + .combine(ch_tools_single.propr) + .map { + metacounts, counts, metatools -> + [ metacounts+metatools, counts ] + } + .set { ch_counts_propr } - PROPR(ch_propr) + PROPR(ch_counts_propr) ch_matrix = PROPR.out.matrix ch_adjacency = PROPR.out.adj + // TODO: divide propr module into cor, propr, pcor, pcorbshrink, etc. + emit: matrix = ch_matrix adjacency = ch_adjacency - } diff --git a/subworkflows/local/differential/main.nf b/subworkflows/local/differential/main.nf index 65988020..39476eac 100644 --- a/subworkflows/local/differential/main.nf +++ b/subworkflows/local/differential/main.nf @@ -4,15 +4,20 @@ include { PROPR_PROPD as PROPD } from "../../../modules/nf-core/propr/propd/main.nf" include { DESEQ2_DIFFERENTIAL } from '../../../modules/nf-core/deseq2/differential/main' - workflow DIFFERENTIAL { take: - ch_contrasts // [meta, contrast_variable, reference, target] - ch_samplesheet - ch_counts ch_tools + ch_counts + ch_samplesheet + ch_contrasts // [meta, contrast_variable, reference, target] main: + + // initialize empty results channels + ch_results = Channel.empty() + ch_adjacency = Channel.empty() + + // branch tools to select the correct differential analysis method ch_tools .branch { propd: it[0]["diff_method"] == "propd" @@ -20,15 +25,17 @@ workflow DIFFERENTIAL { } .set { ch_tools_single } + // ---------------------------------------------------- + // Perform differential analysis with propd + // ---------------------------------------------------- - // Perform differential analysis with PROPD ch_counts .combine(ch_tools_single.propd) .combine(ch_contrasts) .map { meta_counts, counts, tools, meta_contrast, contrast_variable, reference, target -> def meta = meta_counts.clone() + tools.clone() - meta.args_diff = (meta.args_diff ?: "") + " --group_col $contrast_variable" + meta.args_diff = (meta.args_diff ?: "") + " --group_col $contrast_variable" // TODO parse the toolsheet with the ext.arg from modules.config at the beginning of the experimental workflow [ meta, counts ] } .unique() @@ -38,10 +45,13 @@ workflow DIFFERENTIAL { ch_counts_propd, ch_samplesheet.first() ) - ch_results = PROPD.out.results - ch_adjacency = PROPD.out.adj + ch_results = ch_results.mix(PROPD.out.results) + ch_adjacency = ch_adjacency.mix(PROPD.out.adj) + // ---------------------------------------------------- // Perform differential analysis with DESeq2 + // ---------------------------------------------------- + // ToDo: In order to use deseq2 the downstream processes need to be updated to process the output correctly // if (params.transcript_length_matrix) { ch_transcript_lengths = Channel.of([ exp_meta, file(params.transcript_length_matrix, checkIfExists: true)]).first() } else { ch_transcript_lengths = [[],[]] } // if (params.control_features) { ch_control_features = Channel.of([ exp_meta, file(params.control_features, checkIfExists: true)]).first() } else { ch_control_features = [[],[]] } @@ -64,5 +74,5 @@ workflow DIFFERENTIAL { emit: results = ch_results adjacency = ch_adjacency - + // TODO add filtered results channel } diff --git a/subworkflows/local/enrichment/main.nf b/subworkflows/local/enrichment/main.nf index 4f3ae992..20256343 100644 --- a/subworkflows/local/enrichment/main.nf +++ b/subworkflows/local/enrichment/main.nf @@ -1,43 +1,46 @@ // // Perform enrichment analysis // -include { PROPR_GREA as GREA_DIFF } from "../../../modules/nf-core/propr/grea/main.nf" -include { PROPR_GREA as GREA_COR } from "../../../modules/nf-core/propr/grea/main.nf" include { MYGENE } from "../../../modules/nf-core/mygene/main.nf" +include { PROPR_GREA as GREA } from "../../../modules/nf-core/propr/grea/main.nf" workflow ENRICHMENT { take: - ch_diff_adjacency - ch_cor_adjacency ch_counts + ch_results + ch_adjacency + // TODO: add ch_gm when provided by user, etc. main: - MYGENE(ch_counts) - ch_gmt = MYGENE.out.gmt + // initialize empty results channels + ch_enriched = Channel.empty() + + // ---------------------------------------------------- + // Perform enrichment analysis with GREA + // ---------------------------------------------------- - ch_diff_adjacency - .branch { - grea: it[0]["enr_diff_method"] == "grea" - gsea: it[0]["enr_diff_method"] == "gsea" - } - .set { ch_diff_grea } + // construct the gene set selection + // TODO this should be optional, only run when there is no gene set data provided by user + MYGENE(ch_counts) + ch_gmt = MYGENE.out.gmt - GREA_DIFF(ch_diff_grea.grea, ch_gmt.collect()) - ch_enriched_diff = GREA_DIFF.out.enrichedGO + // GREA method needs adjacency matrix as input + ch_adjacency + .filter { it[0]["enr_method"] == "grea" } + .set { ch_adjacency_grea } - ch_cor_adjacency - .branch { - grea: it[0]["enr_cor_method"] == "grea" } - .set { ch_cor_grea } + // run GREA + GREA(ch_adjacency_grea, ch_gmt.collect()) + ch_enriched = ch_enriched.mix(GREA.out.enrichedGO) - ch_cor_grea.grea.view() - ch_diff_grea.grea.view() + // ---------------------------------------------------- + // Perform enrichment analysis with GSEA + // ---------------------------------------------------- - GREA_COR(ch_cor_grea.grea, ch_gmt.collect()) - ch_enriched_cor = GREA_COR.out.enrichedGO + // todo: add gsea here + // then we need to add the corresponding input channels to this subworkflow emit: - enriched_diff = ch_enriched_diff - enriched_cor = ch_enriched_cor + enriched = ch_enriched } diff --git a/subworkflows/local/experimental/main.nf b/subworkflows/local/experimental/main.nf index c0235a13..7cc968a3 100644 --- a/subworkflows/local/experimental/main.nf +++ b/subworkflows/local/experimental/main.nf @@ -1,12 +1,10 @@ // // Run experimental analysis // -include { CORRELATION } from '../correlation/main.nf' include { DIFFERENTIAL } from '../differential/main.nf' -include { VARIABLE_SELECTION } from '../variable_selection/main.nf' +include { CORRELATION } from '../correlation/main.nf' include { ENRICHMENT } from '../enrichment/main.nf' - workflow EXPERIMENTAL { take: ch_contrasts @@ -14,45 +12,54 @@ workflow EXPERIMENTAL { ch_counts ch_tools - main: - // Perform differential analysis + + ch_tools.view() + + // initialize empty results channels + ch_results = Channel.empty() // differential results - it should be a table + ch_adjacency = Channel.empty() // adjacency matrix showing the connections between the genes, with values 1|0 + ch_matrix = Channel.empty() // correlation matrix + ch_enriched = Channel.empty() // output table from enrichment analysis + + // ---------------------------------------------------- + // DIFFERENTIAL ANALYSIS BLOCK + // ---------------------------------------------------- + DIFFERENTIAL( - ch_contrasts, - ch_samplesheet, + ch_tools, ch_counts, - ch_tools + ch_samplesheet, + ch_contrasts ) - ch_diff_results = DIFFERENTIAL.out.results - ch_diff_adjacency = DIFFERENTIAL.out.adjacency + ch_results = ch_results.mix(DIFFERENTIAL.out.results) + ch_adjacency = ch_adjacency.mix(DIFFERENTIAL.out.adjacency) - // Perform variable selection - ch_counts_filtered = VARIABLE_SELECTION(ch_diff_adjacency, ch_counts) + // ---------------------------------------------------- + // CORRELATION ANALYSIS BLOCK + // ---------------------------------------------------- - // Perform correlation analysis CORRELATION( - ch_counts, ch_tools, - ch_counts_filtered + ch_counts ) - ch_matrix = CORRELATION.out.matrix - ch_cor_adjacency = CORRELATION.out.adjacency + ch_matrix = ch_matrix.mix(CORRELATION.out.matrix) + ch_adjacency = ch_adjacency.mix(CORRELATION.out.adjacency) + + // ---------------------------------------------------- + // FUNCTIONAL ENRICHMENT BLOCK + // ---------------------------------------------------- - // Perform enrichment analysis ENRICHMENT( - ch_diff_adjacency, - ch_cor_adjacency, - ch_counts + ch_counts, + ch_results, + ch_adjacency ) - ch_enriched_cor = ENRICHMENT.out.enriched_cor - ch_enriched_diff = ENRICHMENT.out.enriched_diff - - emit: - diff_res = ch_diff_results - diff_adj = ch_diff_adjacency - var_count = ch_counts_filtered - corr_matrix = ch_matrix - corr_adj = ch_cor_adjacency - enriched_cor = ch_enriched_cor - enriched_cor = ch_enriched_diff + ch_enriched = ch_enriched.mix(ENRICHMENT.out.enriched) + + // ---------------------------------------------------- + // VISUALIZATION BLOCK + // ---------------------------------------------------- + + // TODO: call visualization stuff here } diff --git a/subworkflows/local/variable_selection/main.nf b/subworkflows/local/variable_selection/main.nf deleted file mode 100644 index 22b35527..00000000 --- a/subworkflows/local/variable_selection/main.nf +++ /dev/null @@ -1,34 +0,0 @@ -// -// Perform variable selection -// -include { FILTERVAR } from "../../../modules/local/filtervar/main.nf" - -workflow VARIABLE_SELECTION { - take: - ch_adj //meta_tools, adj - ch_counts //meta_id, counts - - main: - ch_counts - .map { - metacounts, counts -> - [counts] - } - .combine(ch_adj) - .map{ - counts, meta, adj -> - [ meta, counts, adj] - } - .branch { - filtervar: it[0]["sel_method"] == "filtervar" - deseqfilter: it[0]["sel_method"] == "deseqfilter" - } - .set { ch_counts_adj_sel } - - FILTERVAR(ch_counts_adj_sel.filtervar) - - ch_counts_cor = FILTERVAR.out.count - - emit: - count = ch_counts_cor -}