diff --git a/CHANGELOG.md b/CHANGELOG.md index 645a7f436f..2caa4fc82e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,8 +13,12 @@ ### Modules +- Added stub test creation to `create_test_yml` ([#2476](https://github.com/nf-core/tools/pull/2476)) + ### Subworkflows +- Added stub test creation to `create_test_yml` ([#2476](https://github.com/nf-core/tools/pull/2476)) + ### General # [v2.10 - Nickel Ostrich](https://github.com/nf-core/tools/releases/tag/2.10) + [2023-09-25] diff --git a/nf_core/modules/test_yml_builder.py b/nf_core/modules/test_yml_builder.py index 3c23416fd4..092f260fcf 100644 --- a/nf_core/modules/test_yml_builder.py +++ b/nf_core/modules/test_yml_builder.py @@ -142,12 +142,19 @@ def build_all_tests(self): """ Go over each entry point and build structure """ + + # Build the other tests for entry_point in self.entry_points: ep_test = self.build_single_test(entry_point) if ep_test: self.tests.append(ep_test) - def build_single_test(self, entry_point): + # Build the stub test + stub_test = self.build_single_test(self.entry_points[0], stub=True) + if stub_test: + self.tests.append(stub_test) + + def build_single_test(self, entry_point, stub=False): """Given the supplied cli flags, prompt for any that are missing. Returns: Test command @@ -158,6 +165,8 @@ def build_single_test(self, entry_point): "tags": [], "files": [], } + stub_option = " -stub" if stub else "" + stub_name = " stub" if stub else "" # Print nice divider line console = rich.console.Console() @@ -166,7 +175,7 @@ def build_single_test(self, entry_point): log.info(f"Building test meta for entry point '{entry_point}'") while ep_test["name"] == "": - default_val = f"{self.module_name.replace('/', ' ')} {entry_point}" + default_val = f"{self.module_name.replace('/', ' ')} {entry_point}{stub_name}" if self.no_prompts: ep_test["name"] = default_val else: @@ -178,6 +187,7 @@ def build_single_test(self, entry_point): default_val = ( f"nextflow run ./tests/modules/{self.org}/{self.module_name} -entry {entry_point} " f"-c ./tests/config/nextflow.config" + f"{stub_option}" ) if self.no_prompts: ep_test["command"] = default_val @@ -200,7 +210,7 @@ def build_single_test(self, entry_point): ).strip() ep_test["tags"] = [t.strip() for t in prompt_tags.split(",")] - ep_test["files"] = self.get_md5_sums(ep_test["command"]) + ep_test["files"] = self.get_md5_sums(ep_test["command"], stub=stub) return ep_test @@ -227,7 +237,7 @@ def _md5(self, fname): md5sum = hash_md5.hexdigest() return md5sum - def create_test_file_dict(self, results_dir, is_repeat=False): + def create_test_file_dict(self, results_dir, is_repeat=False, stub=False): """Walk through directory and collect md5 sums""" test_files = [] for root, _, files in os.walk(results_dir, followlinks=True): @@ -236,13 +246,13 @@ def create_test_file_dict(self, results_dir, is_repeat=False): # add the key here so that it comes first in the dict test_file = {"path": file_path} # Check that this isn't an empty file - if self.check_if_empty_file(file_path): + if self.check_if_empty_file(file_path) and not stub: if not is_repeat: self.errors.append(f"Empty file found! '{os.path.basename(file_path)}'") # Add the md5 anyway, linting should fail later and can be manually removed if needed. # Originally we skipped this if empty, but then it's too easy to miss the warning. # Equally, if a file is legitimately empty we don't want to prevent this from working. - if filename != "versions.yml": + if filename != "versions.yml" and not stub: # Only add md5sum if the file is not versions.yml file_md5 = self._md5(file_path) test_file["md5sum"] = file_md5 @@ -254,7 +264,7 @@ def create_test_file_dict(self, results_dir, is_repeat=False): return test_files - def get_md5_sums(self, command, results_dir=None, results_dir_repeat=None): + def get_md5_sums(self, command, results_dir=None, results_dir_repeat=None, stub=False): """ Recursively go through directories and subdirectories and generate tuples of (, ) @@ -276,11 +286,11 @@ def get_md5_sums(self, command, results_dir=None, results_dir_repeat=None): log.error(f"Directory '{results_dir}' does not exist") results_dir = None - test_files = self.create_test_file_dict(results_dir=results_dir) + test_files = self.create_test_file_dict(results_dir=results_dir, stub=stub) # If test was repeated, compare the md5 sums if results_dir_repeat: - test_files_repeat = self.create_test_file_dict(results_dir=results_dir_repeat, is_repeat=True) + test_files_repeat = self.create_test_file_dict(results_dir=results_dir_repeat, is_repeat=True, stub=stub) # Compare both test.yml files for i in range(len(test_files)): diff --git a/nf_core/subworkflows/test_yml_builder.py b/nf_core/subworkflows/test_yml_builder.py index 468465c9df..db239093da 100644 --- a/nf_core/subworkflows/test_yml_builder.py +++ b/nf_core/subworkflows/test_yml_builder.py @@ -145,12 +145,19 @@ def build_all_tests(self): """ Go over each entry point and build structure """ + + # Build the other tests for entry_point in self.entry_points: ep_test = self.build_single_test(entry_point) if ep_test: self.tests.append(ep_test) - def build_single_test(self, entry_point): + # Build the stub test + stub_test = self.build_single_test(self.entry_points[0], stub=True) + if stub_test: + self.tests.append(stub_test) + + def build_single_test(self, entry_point, stub=False): """Given the supplied cli flags, prompt for any that are missing. Returns: Test command @@ -161,6 +168,8 @@ def build_single_test(self, entry_point): "tags": [], "files": [], } + stub_option = " -stub" if stub else "" + stub_name = " stub" if stub else "" # Print nice divider line console = rich.console.Console() @@ -169,14 +178,14 @@ def build_single_test(self, entry_point): log.info(f"Building test meta for entry point '{entry_point}'") while ep_test["name"] == "": - default_val = f"{self.subworkflow} {entry_point}" + default_val = f"{self.subworkflow} {entry_point}{stub_name}" if self.no_prompts: ep_test["name"] = default_val else: ep_test["name"] = rich.prompt.Prompt.ask("[violet]Test name", default=default_val).strip() while ep_test["command"] == "": - default_val = f"nextflow run ./tests/subworkflows/{self.modules_repo.repo_path}/{self.subworkflow} -entry {entry_point} -c ./tests/config/nextflow.config" + default_val = f"nextflow run ./tests/subworkflows/{self.modules_repo.repo_path}/{self.subworkflow} -entry {entry_point} -c ./tests/config/nextflow.config{stub_option}" if self.no_prompts: ep_test["command"] = default_val else: @@ -195,7 +204,7 @@ def build_single_test(self, entry_point): ).strip() ep_test["tags"] = [t.strip() for t in prompt_tags.split(",")] - ep_test["files"] = self.get_md5_sums(ep_test["command"]) + ep_test["files"] = self.get_md5_sums(ep_test["command"], stub=stub) return ep_test @@ -244,7 +253,7 @@ def _md5(self, fname): md5sum = hash_md5.hexdigest() return md5sum - def create_test_file_dict(self, results_dir, is_repeat=False): + def create_test_file_dict(self, results_dir, is_repeat=False, stub=False): """Walk through directory and collect md5 sums""" test_files = [] for root, _, files in os.walk(results_dir, followlinks=True): @@ -256,14 +265,15 @@ def create_test_file_dict(self, results_dir, is_repeat=False): # add the key here so that it comes first in the dict test_file = {"path": file_path} # Check that this isn't an empty file - if self.check_if_empty_file(file_path): + if self.check_if_empty_file(file_path) and not stub: if not is_repeat: self.errors.append(f"Empty file found! '{os.path.basename(file_path)}'") # Add the md5 anyway, linting should fail later and can be manually removed if needed. # Originally we skipped this if empty, but then it's too easy to miss the warning. # Equally, if a file is legitimately empty we don't want to prevent this from working. - file_md5 = self._md5(file_path) - test_file["md5sum"] = file_md5 + if not stub: + file_md5 = self._md5(file_path) + test_file["md5sum"] = file_md5 # Switch out the results directory path with the expected 'output' directory test_file["path"] = file_path.replace(results_dir, "output") test_files.append(test_file) @@ -272,7 +282,7 @@ def create_test_file_dict(self, results_dir, is_repeat=False): return test_files - def get_md5_sums(self, command, results_dir=None, results_dir_repeat=None): + def get_md5_sums(self, command, results_dir=None, results_dir_repeat=None, stub=False): """ Recursively go through directories and subdirectories and generate tuples of (, ) @@ -294,11 +304,11 @@ def get_md5_sums(self, command, results_dir=None, results_dir_repeat=None): log.error(f"Directory '{results_dir}' does not exist") results_dir = None - test_files = self.create_test_file_dict(results_dir=results_dir) + test_files = self.create_test_file_dict(results_dir=results_dir, stub=stub) # If test was repeated, compare the md5 sums if results_dir_repeat: - test_files_repeat = self.create_test_file_dict(results_dir=results_dir_repeat, is_repeat=True) + test_files_repeat = self.create_test_file_dict(results_dir=results_dir_repeat, is_repeat=True, stub=stub) # Compare both test.yml files for i in range(len(test_files)):