diff --git a/src/deadline/job_attachments/README.md b/src/deadline/job_attachments/README.md index 825bcb1d..454c2ea8 100644 --- a/src/deadline/job_attachments/README.md +++ b/src/deadline/job_attachments/README.md @@ -41,7 +41,7 @@ These snapshots are encapsulated in one or more [`asset_manifests`](asset_manife When starting work, the worker downloads the manifest associated with your job, and recreates the file structure of your submission locally, either downloading all files at once, or as needed if using the [virtual][vfs] job attachments filesystem type. When a task completes, the worker creates a new manifest for any outputs that were specified in the job submission, and uploads the manifest and the outputs back to your S3 bucket. -Manifest files are written to a `manifests` directory within each job bundle that is added to the job history if submitted through the GUI (default: `~/.deadline/job_history`). The file path inside the `manifests` directory corresponds to the S3 manifest path in the submitted job's job attachments metadata. +Manifest files are written to a `manifests` directory within each job bundle that is added to the job history if submitted through the GUI (default: `~/.deadline/job_history`). A corresponding `manifest_s3_mapping` file is created alongside manifests, which specifies each local manifest file with the S3 manifest path in the submitted job's job attachments metadata. [vfs]: https://docs.aws.amazon.com/deadline-cloud/latest/userguide/storage-virtual.html diff --git a/src/deadline/job_attachments/upload.py b/src/deadline/job_attachments/upload.py index 87d13b56..cdd853b3 100644 --- a/src/deadline/job_attachments/upload.py +++ b/src/deadline/job_attachments/upload.py @@ -170,15 +170,6 @@ def upload_assets( ) manifest_name = f"{manifest_name_prefix}_input" - if manifest_write_dir: - local_manifest_file = os.path.join( - manifest_write_dir, "manifests", partial_manifest_prefix, manifest_name - ) - logger.info(f"Creating local manifest file: {local_manifest_file}\n") - os.makedirs(os.path.dirname(local_manifest_file), exist_ok=True) - with open(local_manifest_file, "w") as file: - file.write(manifest.encode()) - if partial_manifest_prefix: partial_manifest_key = _join_s3_paths(partial_manifest_prefix, manifest_name) else: @@ -188,6 +179,11 @@ def upload_assets( partial_manifest_key ) + if manifest_write_dir: + self._write_local_manifest( + manifest_write_dir, manifest_name, full_manifest_key, manifest + ) + self.upload_bytes_to_s3( bytes=BytesIO(manifest_bytes), bucket=job_attachment_settings.s3BucketName, @@ -206,6 +202,30 @@ def upload_assets( return (partial_manifest_key, hash_data(manifest_bytes, hash_alg)) + def _write_local_manifest( + self, + manifest_write_dir: str, + manifest_name: str, + full_manifest_key: str, + manifest: BaseAssetManifest, + ) -> None: + """ + Writes a manifest file locally in a 'manifests' sub-directory. + Also creates/appends to a file mapping the local manifest name to the full S3 key in the same directory. + """ + local_manifest_file = Path(manifest_write_dir, "manifests", manifest_name) + logger.info(f"Creating local manifest file: {local_manifest_file}\n") + local_manifest_file.parent.mkdir(parents=True, exist_ok=True) + with open(local_manifest_file, "w") as file: + file.write(manifest.encode()) + + # Create or append to an existing mapping file. We use this since path lengths can go beyond the + # file name length limit on Windows if we were to create the full S3 key path locally. + manifest_map_file = Path(manifest_write_dir, "manifests", "manifest_s3_mapping") + mapping = {"local_file": manifest_name, "s3_key": full_manifest_key} + with open(manifest_map_file, "a") as mapping_file: + mapping_file.write(f"{mapping}\n") + def upload_input_files( self, manifest: BaseAssetManifest, diff --git a/test/unit/deadline_job_attachments/test_upload.py b/test/unit/deadline_job_attachments/test_upload.py index 4353b897..566f11c7 100644 --- a/test/unit/deadline_job_attachments/test_upload.py +++ b/test/unit/deadline_job_attachments/test_upload.py @@ -94,6 +94,7 @@ def test_asset_management( tmpdir: py.path.local, farm_id, queue_id, + default_job_attachment_s3_settings, assert_canonical_manifest, assert_expected_files_on_s3, caplog, @@ -128,16 +129,12 @@ def test_asset_management( output_dir2 = tmpdir.join("outputs").join("textures") history_dir = tmpdir.join("history") - expected_manifest_file = ( - history_dir.join("manifests") - .join(farm_id) - .join(queue_id) - .join("Inputs") - .join("0000") - .join("e_input") - ) + expected_manifest_file = history_dir.join("manifests").join("e_input") + expected_mapping_file = history_dir.join("manifests").join("manifest_s3_mapping") + expected_mapping_contents = f"{{'local_file': 'e_input', 's3_key': '{default_job_attachment_s3_settings.rootPrefix}/Manifests/{farm_id}/{queue_id}/Inputs/0000/e_input'}}\n" assert not os.path.exists(history_dir) assert not os.path.exists(expected_manifest_file) + assert not os.path.exists(expected_mapping_file) expected_total_input_bytes = ( scene_file.size() + texture_file.size() + normal_file.size() + meta_file.size() @@ -252,6 +249,11 @@ def test_asset_management( # Ensure we wrote our manifest file locally assert os.path.exists(expected_manifest_file) assert os.path.isfile(expected_manifest_file) + assert os.path.exists(expected_mapping_file) + assert os.path.isfile(expected_mapping_file) + with open(expected_mapping_file, "r") as mapping_file: + actual_contents = mapping_file.read() + assert actual_contents == expected_mapping_contents assert_progress_report_last_callback( num_input_files=4,