diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 5faeef49da..d5345bdb33 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -108,6 +108,12 @@ jobs: sudo add-apt-repository --remove ppa:git-core/ppa sudo apt install -y git + - name: Set up Singularity + if: ${{ matrix.test == 'test_download.py'}} + uses: eWaterCycle/setup-singularity@931d4e31109e875b13309ae1d07c70ca8fbc8537 # v7 + with: + singularity-version: 3.8.3 + - name: Get current date id: date run: echo "date=$(date +'%Y-%m')" >> $GITHUB_ENV diff --git a/CHANGELOG.md b/CHANGELOG.md index e42b5e9d3c..d2e5d2b534 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ - Remove obsolete editor settings in `devcontainer.json` and `gitpod.yml` ([#2795](https://github.com/nf-core/tools/pull/2795)) - Add nf-test test instructions to contributing and PR template ([#2807](https://github.com/nf-core/tools/pull/2807)) +### Download + +- Improved offline container image resolution by introducing symlinks, fixes issues [#2751](https://github.com/nf-core/tools/issues/2751), [#2644](https://github.com/nf-core/tools/issues/2644) and [demultiplex#164](https://github.com/nf-core/demultiplex/issues/164): ([#2768](https://github.com/nf-core/tools/pull/2768)) + ### Linting ### Components diff --git a/nf_core/download.py b/nf_core/download.py index bb7b2ae473..d08e0ba40e 100644 --- a/nf_core/download.py +++ b/nf_core/download.py @@ -1,6 +1,5 @@ """Downloads a nf-core pipeline to the local file system.""" - import concurrent.futures import io import logging @@ -11,6 +10,7 @@ import tarfile import textwrap from datetime import datetime +from typing import List, Optional, Tuple from zipfile import ZipFile import git @@ -34,13 +34,17 @@ log = logging.getLogger(__name__) stderr = rich.console.Console( - stderr=True, style="dim", highlight=False, force_terminal=nf_core.utils.rich_force_colors() + stderr=True, + style="dim", + highlight=False, + force_terminal=nf_core.utils.rich_force_colors(), ) class DownloadError(RuntimeError): """A custom exception that is raised when nf-core download encounters a problem that we already took into consideration. - In this case, we do not want to print the traceback, but give the user some concise, helpful feedback instead.""" + In this case, we do not want to print the traceback, but give the user some concise, helpful feedback instead. + """ class DownloadProgress(rich.progress.Progress): @@ -130,6 +134,8 @@ def __init__( self.container_library = [*container_library] else: self.container_library = ["quay.io"] + # Create a new set and add all values from self.container_library (CLI arguments to --container-library) + self.registry_set = set(self.container_library) if hasattr(self, "container_library") else set() # if a container_cache_index is given, use the file and overrule choice. self.container_cache_utilisation = "remote" if container_cache_index else container_cache_utilisation self.container_cache_index = container_cache_index @@ -255,6 +261,7 @@ def download_workflow_static(self): # Collect all required singularity images if self.container_system == "singularity": self.find_container_images(os.path.join(self.outdir, revision_dirname)) + self.gather_registries(os.path.join(self.outdir, revision_dirname)) try: self.get_singularity_images(current_revision=item[0]) @@ -275,7 +282,7 @@ def download_workflow_tower(self, location=None): remote_url=f"https://github.com/{self.pipeline}.git", revision=self.revision if self.revision else None, commit=self.wf_sha.values() if bool(self.wf_sha) else None, - location=location if location else None, # manual location is required for the tests to work + location=(location if location else None), # manual location is required for the tests to work in_cache=False, ) @@ -292,6 +299,7 @@ def download_workflow_tower(self, location=None): self.workflow_repo.checkout(commit) # Collect all required singularity images self.find_container_images(self.workflow_repo.access()) + self.gather_registries(self.workflow_repo.access()) try: self.get_singularity_images(current_revision=revision) @@ -335,7 +343,7 @@ def prompt_revision(self): if bool(choice): # have to make sure that self.revision is a list of strings, regardless if choice is str or list of strings. - self.revision.append(choice) if isinstance(choice, str) else self.revision.extend(choice) + (self.revision.append(choice) if isinstance(choice, str) else self.revision.extend(choice)) else: if bool(tag_set): self.revision = tag_set @@ -362,7 +370,8 @@ def get_revision_hash(self): else: log.info( "Available {} revisions: '{}'".format( - self.pipeline, "', '".join([r["tag_name"] for r in self.wf_revisions]) + self.pipeline, + "', '".join([r["tag_name"] for r in self.wf_revisions]), ) ) log.info("Available {} branches: '{}'".format(self.pipeline, "', '".join(self.wf_branches.keys()))) @@ -425,7 +434,9 @@ def prompt_singularity_cachedir_creation(self): cachedir_path = None while cachedir_path is None: prompt_cachedir_path = questionary.path( - "Specify the path:", only_directories=True, style=nf_core.utils.nfcore_question_style + "Specify the path:", + only_directories=True, + style=nf_core.utils.nfcore_question_style, ).unsafe_ask() cachedir_path = os.path.abspath(os.path.expanduser(prompt_cachedir_path)) if prompt_cachedir_path == "": @@ -603,7 +614,10 @@ def download_wf_files(self, revision, wf_sha, download_url): # Rename the internal directory name to be more friendly gh_name = f"{self.pipeline}-{wf_sha if bool(wf_sha) else ''}".split("/")[-1] - os.rename(os.path.join(self.outdir, gh_name), os.path.join(self.outdir, revision_dirname)) + os.rename( + os.path.join(self.outdir, gh_name), + os.path.join(self.outdir, revision_dirname), + ) # Make downloaded files executable for dirpath, _, filelist in os.walk(os.path.join(self.outdir, revision_dirname)): @@ -624,7 +638,10 @@ def download_configs(self): zipfile.extractall(self.outdir) # Rename the internal directory name to be more friendly - os.rename(os.path.join(self.outdir, configs_local_dir), os.path.join(self.outdir, "configs")) + os.rename( + os.path.join(self.outdir, configs_local_dir), + os.path.join(self.outdir, "configs"), + ) # Make downloaded files executable for dirpath, _, filelist in os.walk(os.path.join(self.outdir, "configs")): @@ -697,7 +714,7 @@ def find_container_images(self, workflow_directory): config_findings_dsl2 = re.findall(config_regex, v) if bool(config_findings_dsl2): - # finding fill always be a tuple of length 2, first the quote used and second the enquoted value. + # finding will always be a tuple of length 2, first the quote used and second the enquoted value. for finding in config_findings_dsl2: config_findings.append(finding + (self.nf_config, "Nextflow configs")) else: # no regex match, likely just plain string @@ -734,7 +751,8 @@ def find_container_images(self, workflow_directory): re.DOTALL is used to account for the string to be spread out across multiple lines. """ container_regex = re.compile( - r"container\s+[\\s{}=$]*(?P[\'\"])(?P(?:.(?!\1))*.?)\1[\\s}]*", re.DOTALL + r"container\s+[\\s{}=$]*(?P[\'\"])(?P(?:.(?!\1))*.?)\1[\\s}]*", + re.DOTALL, ) local_module_findings = re.findall(container_regex, search_space) @@ -856,7 +874,8 @@ def rectify_raw_container_matches(self, raw_findings): """ container_value_defs = re.findall( - r"[^\"\'](?P(?(?:.(?!(?(?(?:.(?!(? None: + """Fetch the registries from the pipeline config and CLI arguments and store them in a set. + This is needed to symlink downloaded container images so Nextflow will find them. + """ + + # should exist, because find_container_images() is always called before + if not self.nf_config: + self.nf_config = nf_core.utils.fetch_wf_config(workflow_directory) + + # Select registries defined in pipeline config + configured_registries = [ + "apptainer.registry", + "docker.registry", + "podman.registry", + "singularity.registry", + ] + + for registry in configured_registries: + if registry in self.nf_config: + self.registry_set.add(self.nf_config[registry]) + + # add depot.galaxyproject.org to the set, because it is the default registry for singularity hardcoded in modules + self.registry_set.add("depot.galaxyproject.org") + + def symlink_singularity_images(self, image_out_path: str) -> None: + """Create a symlink for each registry in the registry set that points to the image. + We have dropped the explicit registries from the modules in favor of the configurable registries. + Unfortunately, Nextflow still expects the registry to be part of the file name, so a symlink is needed. + + The base image, e.g. ./nf-core-gatk-4.4.0.0.img will thus be symlinked as for example ./quay.io-nf-core-gatk-4.4.0.0.img + by prepending all registries in self.registry_set to the image name. + + Unfortunately, out output image name may contain a registry definition (Singularity image pulled from depot.galaxyproject.org + or older pipeline version, where the docker registry was part of the image name in the modules). Hence, it must be stripped + before to ensure that it is really the base name. + """ + + if self.registry_set: + # Create a regex pattern from the set, in case trimming is needed. + trim_pattern = "|".join(f"^{re.escape(registry)}-?" for registry in self.registry_set) + + for registry in self.registry_set: + if not os.path.basename(image_out_path).startswith(registry): + symlink_name = os.path.join("./", f"{registry}-{os.path.basename(image_out_path)}") + else: + trimmed_name = re.sub(f"{trim_pattern}", "", os.path.basename(image_out_path)) + symlink_name = os.path.join("./", f"{registry}-{trimmed_name}") + + symlink_full = os.path.join(os.path.dirname(image_out_path), symlink_name) + target_name = os.path.join("./", os.path.basename(image_out_path)) + + if not os.path.exists(symlink_full) and target_name != symlink_name: + os.makedirs(os.path.dirname(symlink_full), exist_ok=True) + image_dir = os.open(os.path.dirname(image_out_path), os.O_RDONLY) + try: + os.symlink( + target_name, + symlink_name, + dir_fd=image_dir, + ) + log.debug(f"Symlinked {target_name} as {symlink_name}.") + finally: + os.close(image_dir) + + def get_singularity_images(self, current_revision: str = "") -> None: """Loop through container names and download Singularity images""" if len(self.containers) == 0: @@ -972,14 +1055,16 @@ def get_singularity_images(self, current_revision=""): with DownloadProgress() as progress: task = progress.add_task( - "Collecting container images", total=len(self.containers), progress_type="summary" + "Collecting container images", + total=len(self.containers), + progress_type="summary", ) # Organise containers based on what we need to do with them - containers_exist = [] - containers_cache = [] - containers_download = [] - containers_pull = [] + containers_exist: List[str] = [] + containers_cache: List[Tuple[str, str, Optional[str]]] = [] + containers_download: List[Tuple[str, str, Optional[str]]] = [] + containers_pull: List[Tuple[str, str, Optional[str]]] = [] for container in self.containers: # Fetch the output and cached filenames for this container out_path, cache_path = self.singularity_image_filenames(container) @@ -1002,16 +1087,16 @@ def get_singularity_images(self, current_revision=""): # We have a copy of this in the NXF_SINGULARITY_CACHE dir if cache_path and os.path.exists(cache_path): - containers_cache.append([container, out_path, cache_path]) + containers_cache.append((container, out_path, cache_path)) continue # Direct download within Python if container.startswith("http"): - containers_download.append([container, out_path, cache_path]) + containers_download.append((container, out_path, cache_path)) continue # Pull using singularity - containers_pull.append([container, out_path, cache_path]) + containers_pull.append((container, out_path, cache_path)) # Exit if we need to pull images and Singularity is not installed if len(containers_pull) > 0: @@ -1043,8 +1128,8 @@ def get_singularity_images(self, current_revision=""): # Kick off concurrent downloads future_downloads = [ - pool.submit(self.singularity_download_image, *container, progress) - for container in containers_download + pool.submit(self.singularity_download_image, *containers, progress) + for containers in containers_download ] # Make ctrl-c work with multi-threading @@ -1069,13 +1154,13 @@ def get_singularity_images(self, current_revision=""): # Re-raise exception on the main thread raise - for container in containers_pull: + for containers in containers_pull: progress.update(task, description="Pulling singularity images") # it is possible to try multiple registries / mirrors if multiple were specified. # Iteration happens over a copy of self.container_library[:], as I want to be able to remove failing registries for subsequent images. for library in self.container_library[:]: try: - self.singularity_pull_image(*container, library, progress) + self.singularity_pull_image(*containers, library, progress) # Pulling the image was successful, no ContainerError was raised, break the library loop break except ContainerError.ImageExistsError: @@ -1112,12 +1197,12 @@ def get_singularity_images(self, current_revision=""): # The else clause executes after the loop completes normally. # This means the library loop completed without breaking, indicating failure for all libraries (registries) log.error( - f"Not able to pull image of {container}. Service might be down or internet connection is dead." + f"Not able to pull image of {containers}. Service might be down or internet connection is dead." ) # Task should advance in any case. Failure to pull will not kill the download process. progress.update(task, advance=1) - def singularity_image_filenames(self, container): + def singularity_image_filenames(self, container: str) -> Tuple[str, Optional[str]]: """Check Singularity cache for image, copy to destination folder if found. Args: @@ -1125,8 +1210,11 @@ def singularity_image_filenames(self, container): or a Docker Hub repository ID. Returns: - results (bool, str): Returns True if we have the image in the target location. - Returns a download path if not. + tuple (str, str): Returns a tuple of (out_path, cache_path). + out_path is the final target output path. it may point to the NXF_SINGULARITY_CACHEDIR, if cache utilisation was set to 'amend'. + If cache utilisation was set to 'copy', it will point to the target folder, a subdirectory of the output directory. In the latter case, + cache_path may either be None (image is not yet cached locally) or point to the image in the NXF_SINGULARITY_CACHEDIR, so it will not be + downloaded from the web again, but directly copied from there. See get_singularity_images() for implementation. """ # Generate file paths @@ -1148,6 +1236,15 @@ def singularity_image_filenames(self, container): # Add file extension out_name = out_name + extension + # Trim potential registries from the name for consistency. + # This will allow pipelines to work offline without symlinked images, + # if docker.registry / singularity.registry are set to empty strings at runtime, which can be included in the HPC config profiles easily. + if self.registry_set: + # Create a regex pattern from the set of registries + trim_pattern = "|".join(f"^{re.escape(registry)}-?" for registry in self.registry_set) + # Use the pattern to trim the string + out_name = re.sub(f"{trim_pattern}", "", out_name) + # Full destination and cache paths out_path = os.path.abspath(os.path.join(self.outdir, "singularity-images", out_name)) cache_path = None @@ -1162,14 +1259,18 @@ def singularity_image_filenames(self, container): return (out_path, cache_path) - def singularity_copy_cache_image(self, container, out_path, cache_path): + def singularity_copy_cache_image(self, container: str, out_path: str, cache_path: Optional[str]) -> None: """Copy Singularity image from NXF_SINGULARITY_CACHEDIR to target folder.""" # Copy to destination folder if we have a cached version if cache_path and os.path.exists(cache_path): log.debug(f"Copying {container} from cache: '{os.path.basename(out_path)}'") shutil.copyfile(cache_path, out_path) + # Create symlinks to ensure that the images are found even with different registries being used. + self.symlink_singularity_images(out_path) - def singularity_download_image(self, container, out_path, cache_path, progress): + def singularity_download_image( + self, container: str, out_path: str, cache_path: Optional[str], progress: DownloadProgress + ) -> None: """Download a singularity image from the web. Use native Python to download the file. @@ -1216,7 +1317,6 @@ def singularity_download_image(self, container, out_path, cache_path, progress): # Rename partial filename to final filename os.rename(output_path_tmp, output_path) - output_path_tmp = None # Copy cached download if we are using the cache if cache_path: @@ -1224,6 +1324,9 @@ def singularity_download_image(self, container, out_path, cache_path, progress): progress.update(task, description="Copying from cache to target directory") shutil.copyfile(cache_path, out_path) + # Create symlinks to ensure that the images are found even with different registries being used. + self.symlink_singularity_images(output_path) + progress.remove_task(task) except: @@ -1238,8 +1341,12 @@ def singularity_download_image(self, container, out_path, cache_path, progress): os.remove(output_path) # Re-raise the caught exception raise + finally: + del output_path_tmp - def singularity_pull_image(self, container, out_path, cache_path, library, progress): + def singularity_pull_image( + self, container: str, out_path: str, cache_path: Optional[str], library: List[str], progress: DownloadProgress + ) -> None: """Pull a singularity image using ``singularity pull`` Attempt to use a local installation of singularity to pull the image. @@ -1254,6 +1361,11 @@ def singularity_pull_image(self, container, out_path, cache_path, library, progr """ output_path = cache_path or out_path + # where the output of 'singularity pull' is first generated before being copied to the NXF_SINGULARITY_CACHDIR. + # if not defined by the Singularity administrators, then use the temporary directory to avoid storing the images in the work directory. + if os.environ.get("SINGULARITY_CACHEDIR") is None: + os.environ["SINGULARITY_CACHEDIR"] = NFCORE_CACHE_DIR + # Sometimes, container still contain an explicit library specification, which # resulted in attempted pulls e.g. from docker://quay.io/quay.io/qiime2/core:2022.11 # Thus, if an explicit registry is specified, the provided -l value is ignored. @@ -1266,7 +1378,13 @@ def singularity_pull_image(self, container, out_path, cache_path, library, progr absolute_URI = False if shutil.which("singularity"): - singularity_command = ["singularity", "pull", "--name", output_path, address] + singularity_command = [ + "singularity", + "pull", + "--name", + output_path, + address, + ] elif shutil.which("apptainer"): singularity_command = ["apptainer", "pull", "--name", output_path, address] else: @@ -1275,7 +1393,13 @@ def singularity_pull_image(self, container, out_path, cache_path, library, progr log.debug(f"Singularity command: {' '.join(singularity_command)}") # Progress bar to show that something is happening - task = progress.add_task(container, start=False, total=False, progress_type="singularity_pull", current_log="") + task = progress.add_task( + container, + start=False, + total=False, + progress_type="singularity_pull", + current_log="", + ) # Run the singularity pull command with subprocess.Popen( @@ -1286,9 +1410,10 @@ def singularity_pull_image(self, container, out_path, cache_path, library, progr bufsize=1, ) as proc: lines = [] - for line in proc.stdout: - lines.append(line) - progress.update(task, current_log=line.strip()) + if proc.stdout is not None: + for line in proc.stdout: + lines.append(line) + progress.update(task, current_log=line.strip()) if lines: # something went wrong with the container retrieval @@ -1310,9 +1435,12 @@ def singularity_pull_image(self, container, out_path, cache_path, library, progr progress.update(task, current_log="Copying from cache to target directory") shutil.copyfile(cache_path, out_path) + # Create symlinks to ensure that the images are found even with different registries being used. + self.symlink_singularity_images(output_path) + progress.remove_task(task) - def compress_download(self): + def compress_download(self) -> None: """Take the downloaded files and make a compressed .tar.gz archive.""" log.debug(f"Creating archive: {self.output_filename}") @@ -1584,7 +1712,16 @@ def bare_clone(self, destination): class ContainerError(Exception): """A class of errors related to pulling containers with Singularity/Apptainer""" - def __init__(self, container, registry, address, absolute_URI, out_path, singularity_command, error_msg): + def __init__( + self, + container, + registry, + address, + absolute_URI, + out_path, + singularity_command, + error_msg, + ): self.container = container self.registry = registry self.address = address @@ -1600,6 +1737,7 @@ def __init__(self, container, registry, address, absolute_URI, out_path, singula elif ( re.search(r"requested\saccess\sto\sthe\sresource\sis\sdenied", line) or re.search(r"StatusCode:\s404", line) + or re.search(r"400|Bad\s?Request", line) or re.search(r"invalid\sstatus\scode\sfrom\sregistry\s400", line) ): # Unfortunately, every registry seems to return an individual error here: diff --git a/tests/test_download.py b/tests/test_download.py index 7f34f7fbc6..d823040247 100644 --- a/tests/test_download.py +++ b/tests/test_download.py @@ -352,7 +352,95 @@ def test_get_singularity_images(self, tmp_path, mock_fetch_wf_config): # Test that they are all caught inside get_singularity_images(). download_obj.get_singularity_images() + @with_temporary_folder + @mock.patch("os.makedirs") + @mock.patch("os.symlink") + @mock.patch("os.open") + @mock.patch("os.close") + @mock.patch("re.sub") + @mock.patch("os.path.basename") + @mock.patch("os.path.dirname") + def test_symlink_singularity_images( + self, + tmp_path, + mock_dirname, + mock_basename, + mock_resub, + mock_close, + mock_open, + mock_symlink, + mock_makedirs, + ): + # Setup + mock_resub.return_value = "singularity-image.img" + mock_dirname.return_value = f"{tmp_path}/path/to" + mock_basename.return_value = "quay.io-singularity-image.img" + mock_open.return_value = 12 # file descriptor + mock_close.return_value = 12 # file descriptor + + download_obj = DownloadWorkflow( + pipeline="dummy", + outdir=tmp_path, + container_library=("mirage-the-imaginative-registry.io", "quay.io"), + ) + + # Call the method + download_obj.symlink_singularity_images(f"{tmp_path}/path/to/quay.io-singularity-image.img") + print(mock_resub.call_args) + + # Check that os.makedirs was called with the correct arguments + mock_makedirs.assert_any_call(f"{tmp_path}/path/to", exist_ok=True) + + # Check that os.open was called with the correct arguments + mock_open.assert_called_once_with(f"{tmp_path}/path/to", os.O_RDONLY) + + # Check that os.symlink was called with the correct arguments + mock_symlink.assert_any_call( + "./quay.io-singularity-image.img", + "./mirage-the-imaginative-registry.io-quay.io-singularity-image.img", + dir_fd=12, + ) + # Check that there is no attempt to symlink to itself (test parameters would result in that behavior if not checked in the function) + assert ( + unittest.mock.call("./quay.io-singularity-image.img", "./quay.io-singularity-image.img", dir_fd=12) + not in mock_symlink.call_args_list + ) + + # + # Test for gather_registries' + # + @with_temporary_folder + @mock.patch("nf_core.utils.fetch_wf_config") + def test_gather_registries(self, tmp_path, mock_fetch_wf_config): + download_obj = DownloadWorkflow( + pipeline="dummy", + outdir=tmp_path, + container_library=None, + ) + mock_fetch_wf_config.return_value = { + "apptainer.registry": "apptainer-registry.io", + "docker.registry": "docker.io", + "podman.registry": "podman-registry.io", + "singularity.registry": "singularity-registry.io", + "someother.registry": "fake-registry.io", + } + download_obj.gather_registries(tmp_path) + assert download_obj.registry_set + assert isinstance(download_obj.registry_set, set) + assert len(download_obj.registry_set) == 6 + + assert "quay.io" in download_obj.registry_set # default registry, if no container library is provided. + assert "depot.galaxyproject.org" in download_obj.registry_set # default registry, often hardcoded in modules + assert "apptainer-registry.io" in download_obj.registry_set + assert "docker.io" in download_obj.registry_set + assert "podman-registry.io" in download_obj.registry_set + assert "singularity-registry.io" in download_obj.registry_set + # it should only pull the apptainer, docker, podman and singularity registry from the config, but not any registry. + assert "fake-registry.io" not in download_obj.registry_set + + # # If Singularity is not installed, it raises a OSError because the singularity command can't be found. + # @pytest.mark.skipif( shutil.which("singularity") is not None, reason="Can't test how the code behaves when singularity is not installed if it is.", @@ -366,6 +454,68 @@ def test_singularity_pull_image_singularity_not_installed(self, tmp_dir, mock_ri "a-container", f"{tmp_dir}/anothercontainer.sif", None, "quay.io", mock_rich_progress ) + # + # Test for 'singularity_image_filenames' function + # + @with_temporary_folder + def test_singularity_image_filenames(self, tmp_path): + os.environ["NXF_SINGULARITY_CACHEDIR"] = f"{tmp_path}/cachedir" + + download_obj = DownloadWorkflow(pipeline="dummy", outdir=tmp_path) + download_obj.outdir = tmp_path + download_obj.container_cache_utilisation = "amend" + download_obj.registry_set = {"docker.io", "quay.io", "depot.galaxyproject.org"} + + ## Test phase I: Container not yet cached, should be amended to cache + # out_path: str, Path to cache + # cache_path: None + + result = download_obj.singularity_image_filenames( + "https://depot.galaxyproject.org/singularity/bbmap:38.93--he522d1c_0" + ) + + # Assert that the result is a tuple of length 2 + self.assertIsInstance(result, tuple) + self.assertEqual(len(result), 2) + + # Assert that the types of the elements are (str, None) + self.assertTrue(all((isinstance(element, str), element is None) for element in result)) + + # assert that the correct out_path is returned that points to the cache + assert result[0].endswith("/cachedir/singularity-bbmap-38.93--he522d1c_0.img") + + ## Test phase II: Test various container names + # out_path: str, Path to cache + # cache_path: None + result = download_obj.singularity_image_filenames( + "quay.io/biocontainers/mulled-v2-1fa26d1ce03c295fe2fdcf85831a92fbcbd7e8c2:59cdd445419f14abac76b31dd0d71217994cbcc9-0" + ) + assert result[0].endswith( + "/cachedir/biocontainers-mulled-v2-1fa26d1ce03c295fe2fdcf85831a92fbcbd7e8c2-59cdd445419f14abac76b31dd0d71217994cbcc9-0.img" + ) + + result = download_obj.singularity_image_filenames("nf-core/ubuntu:20.04") + assert result[0].endswith("/cachedir/nf-core-ubuntu-20.04.img") + + ## Test phase III: Container wil lbe cached but also copied to out_path + # out_path: str, Path to cache + # cache_path: str, Path to cache + download_obj.container_cache_utilisation = "copy" + result = download_obj.singularity_image_filenames( + "https://depot.galaxyproject.org/singularity/bbmap:38.93--he522d1c_0" + ) + + self.assertTrue(all(isinstance(element, str) for element in result)) + assert result[0].endswith("/singularity-images/singularity-bbmap-38.93--he522d1c_0.img") + assert result[1].endswith("/cachedir/singularity-bbmap-38.93--he522d1c_0.img") + + ## Test phase IV: Expect an error if no NXF_SINGULARITY_CACHEDIR is defined + os.environ["NXF_SINGULARITY_CACHEDIR"] = "" + with self.assertRaises(FileNotFoundError): + download_obj.singularity_image_filenames( + "https://depot.galaxyproject.org/singularity/bbmap:38.93--he522d1c_0" + ) + # # Test for '--singularity-cache remote --singularity-cache-index'. Provide a list of containers already available in a remote location. #