diff --git a/Cargo.lock b/Cargo.lock index e672579084e..4413c54deb8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1217,6 +1217,7 @@ dependencies = [ "nydus-utils", "serde", "serde_json", + "thiserror", "vm-memory", "vmm-sys-util", ] diff --git a/builder/src/merge.rs b/builder/src/merge.rs index 56b0c8d3a94..31ad2937a13 100644 --- a/builder/src/merge.rs +++ b/builder/src/merge.rs @@ -8,10 +8,10 @@ use std::convert::TryFrom; use std::path::PathBuf; use std::sync::Arc; -use anyhow::{anyhow, bail, ensure, Context, Result}; +use anyhow::{anyhow, bail, Context, Ok, Result}; use hex::FromHex; use nydus_api::ConfigV2; -use nydus_rafs::metadata::{RafsSuper, RafsVersion}; +use nydus_rafs::metadata::{MergeError, RafsSuper, RafsVersion}; use nydus_storage::device::{BlobFeatures, BlobInfo}; use nydus_utils::crypt; @@ -83,49 +83,56 @@ impl Merger { target: ArtifactStorage, chunk_dict: Option, config_v2: Arc, - ) -> Result { + ) -> Result { if sources.is_empty() { - bail!("source bootstrap list is empty , at least one bootstrap is required"); + return Err(MergeError::Other(anyhow!( + "source bootstrap list is empty , at least one bootstrap is required" + ))); } if let Some(digests) = blob_digests.as_ref() { - ensure!( - digests.len() == sources.len(), - "number of blob digest entries {} doesn't match number of sources {}", - digests.len(), - sources.len(), - ); + if digests.len() != sources.len() { + return Err(MergeError::Other(anyhow!( + "number of blob digest entries {} doesn't match number of sources {}", + digests.len(), + sources.len() + ))); + } } if let Some(original_ids) = original_blob_ids.as_ref() { - ensure!( - original_ids.len() == sources.len(), - "number of original blob id entries {} doesn't match number of sources {}", - original_ids.len(), - sources.len(), - ); + if original_ids.len() != sources.len() { + return Err(MergeError::Other(anyhow!( + "number of original blob id entries {} doesn't match number of sources {}", + original_ids.len(), + sources.len() + ))); + } } if let Some(sizes) = blob_sizes.as_ref() { - ensure!( - sizes.len() == sources.len(), - "number of blob size entries {} doesn't match number of sources {}", - sizes.len(), - sources.len(), - ); + if sizes.len() != sources.len() { + return Err(MergeError::Other(anyhow!( + "number of blob size entries {} doesn't match number of sources {}", + sizes.len(), + sources.len() + ))); + } } if let Some(toc_digests) = blob_toc_digests.as_ref() { - ensure!( - toc_digests.len() == sources.len(), - "number of toc digest entries {} doesn't match number of sources {}", - toc_digests.len(), - sources.len(), - ); + if toc_digests.len() != sources.len() { + return Err(MergeError::Other(anyhow!( + "number of toc digest entries {} doesn't match number of sources {}", + toc_digests.len(), + sources.len() + ))); + } } if let Some(sizes) = blob_toc_sizes.as_ref() { - ensure!( - sizes.len() == sources.len(), - "number of toc size entries {} doesn't match number of sources {}", - sizes.len(), - sources.len(), - ); + if sizes.len() != sources.len() { + return Err(MergeError::Other(anyhow!( + "number of toc size entries {0} doesn't match number of sources {1}", + sizes.len(), + sources.len() + ))); + } } let mut tree: Option = None; @@ -177,7 +184,11 @@ impl Merger { match rs.meta.get_cipher() { crypt::Algorithm::None => (), crypt::Algorithm::Aes128Xts => ctx.cipher = crypt::Algorithm::Aes128Xts, - _ => bail!("invalid per layer bootstrap, only supports aes-128-xts"), + _ => { + return Err(MergeError::Other(anyhow!( + "invalid per layer bootstrap, only supports aes-128-xts" + ))) + } } ctx.explicit_uidgid = rs.meta.explicit_uidgid(); if config.as_ref().unwrap().is_tarfs_mode { @@ -190,13 +201,12 @@ impl Merger { for blob in blobs { let mut blob_ctx = BlobContext::from(ctx, &blob, ChunkSource::Parent)?; if let Some(chunk_size) = chunk_size { - ensure!( - chunk_size == blob_ctx.chunk_size, - "can not merge bootstraps with inconsistent chunk size, current bootstrap {:?} with chunk size {:x}, expected {:x}", + if chunk_size != blob_ctx.chunk_size { + return Err(MergeError::Other(anyhow!("can not merge bootstraps with inconsistent chunk size, current bootstrap {:?} with chunk size {:x}, expected {:x}", bootstrap_path, blob_ctx.chunk_size, - chunk_size, - ); + chunk_size))); + } } else { chunk_size = Some(blob_ctx.chunk_size); } @@ -205,7 +215,9 @@ impl Merger { // use the same chunk dict bootstrap. So the parent bootstrap includes multiple blobs, but // only at most one new blob, the other blobs should be from the chunk dict image. if parent_blob_added { - bail!("invalid per layer bootstrap, having multiple associated data blobs"); + return Err(MergeError::Other(anyhow!( + "invalid per layer bootstrap, having multiple associated data blobs" + ))); } parent_blob_added = true; @@ -222,8 +234,11 @@ impl Merger { { blob_ctx.blob_id = original_id; } else { - blob_ctx.blob_id = - BlobInfo::get_blob_id_from_meta_path(bootstrap_path)?; + let blob_id = BlobInfo::get_blob_id_from_meta_path(bootstrap_path); + if let Err(err) = blob_id { + return Err(MergeError::Other(anyhow!(err))); + } + blob_ctx.blob_id = blob_id.unwrap(); } } if let Some(digest) = Self::get_digest_from_list(&blob_digests, layer_idx)? { @@ -290,10 +305,14 @@ impl Merger { if ctx.conversion_type == ConversionType::TarToTarfs { if parent_layers > 0 { - bail!("merging RAFS in TARFS mode conflicts with `--parent-bootstrap`"); + return Err(MergeError::Other(anyhow!( + "merging RAFS in TARFS mode conflicts with `--parent-bootstrap`" + ))); } if !chunk_dict_blobs.is_empty() { - bail!("merging RAFS in TARFS mode conflicts with `--chunk-dict`"); + return Err(MergeError::Other(anyhow!( + "merging RAFS in TARFS mode conflicts with `--chunk-dict`" + ))); } } @@ -312,6 +331,6 @@ impl Merger { bootstrap .dump(ctx, &mut bootstrap_storage, &mut bootstrap_ctx, &blob_table) .context(format!("dump bootstrap to {:?}", target.display()))?; - BuildOutput::new(&blob_mgr, &bootstrap_storage) + BuildOutput::new(&blob_mgr, &bootstrap_storage).map_err(MergeError::Other) } } diff --git a/rafs/Cargo.toml b/rafs/Cargo.toml index 4581d5e0524..7424a811172 100644 --- a/rafs/Cargo.toml +++ b/rafs/Cargo.toml @@ -20,6 +20,7 @@ serde = { version = "1.0.110", features = ["serde_derive", "rc"] } serde_json = "1.0.53" vm-memory = "0.10" fuse-backend-rs = "^0.10.3" +thiserror = "1" nydus-api = { version = "0.3", path = "../api" } nydus-storage = { version = "0.6", path = "../storage", features = ["backend-localfs"] } diff --git a/rafs/src/metadata/mod.rs b/rafs/src/metadata/mod.rs index c04c3b4da1d..0f580d4753c 100644 --- a/rafs/src/metadata/mod.rs +++ b/rafs/src/metadata/mod.rs @@ -17,6 +17,7 @@ use std::path::{Component, Path, PathBuf}; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; +use thiserror::Error; use anyhow::bail; use fuse_backend_rs::abi::fuse_abi::Attr; @@ -397,18 +398,26 @@ pub struct RafsSuperConfig { pub is_tarfs_mode: bool, } +#[derive(Error, Debug)] +pub enum MergeError { + #[error("Inconsistent RAFS Filesystem: {0}")] + InconsistentFilesystem(String), + #[error(transparent)] + Other(#[from] anyhow::Error), +} + impl RafsSuperConfig { /// Check compatibility for two RAFS filesystems. - pub fn check_compatibility(&self, meta: &RafsSuperMeta) -> Result<()> { + pub fn check_compatibility(&self, meta: &RafsSuperMeta) -> anyhow::Result<(), MergeError> { if self.chunk_size != meta.chunk_size { - return Err(einval!(format!( + return Err(MergeError::InconsistentFilesystem(format!( "Inconsistent configuration of chunk_size: {} vs {}", self.chunk_size, meta.chunk_size ))); } if self.explicit_uidgid != meta.explicit_uidgid() { - return Err(einval!(format!( + return Err(MergeError::InconsistentFilesystem(format!( "Using inconsistent explicit_uidgid setting {:?}, target explicit_uidgid setting {:?}", self.explicit_uidgid, meta.explicit_uidgid() @@ -416,15 +425,15 @@ impl RafsSuperConfig { } if u32::from(self.version) != meta.version { - return Err(einval!(format!( + let meta_version = RafsVersion::try_from(meta.version); + return Err(MergeError::InconsistentFilesystem(format!( "Using inconsistent RAFS version {:?}, target RAFS version {:?}", - self.version, - RafsVersion::try_from(meta.version)? + self.version, meta_version ))); } if self.version == RafsVersion::V5 && self.digester != meta.get_digester() { - return Err(einval!(format!( + return Err(MergeError::InconsistentFilesystem(format!( "RAFS v5 can not support different digest algorithm due to inode digest, {} vs {}", self.digester, meta.get_digester() @@ -433,7 +442,9 @@ impl RafsSuperConfig { let is_tarfs_mode = meta.flags.contains(RafsSuperFlags::TARTFS_MODE); if is_tarfs_mode != self.is_tarfs_mode { - return Err(einval!(format!("Using inconsistent RAFS TARFS mode"))); + return Err(MergeError::InconsistentFilesystem( + "Using inconsistent RAFS TARFS mode".to_string(), + )); } Ok(()) diff --git a/src/bin/nydus-image/main.rs b/src/bin/nydus-image/main.rs index b42c110b3b6..7251e5f889c 100644 --- a/src/bin/nydus-image/main.rs +++ b/src/bin/nydus-image/main.rs @@ -31,7 +31,7 @@ use nydus_builder::{ BuildContext, BuildOutput, Builder, ConversionType, DirectoryBuilder, Feature, Features, HashChunkDict, Merger, Prefetch, PrefetchPolicy, StargzBuilder, TarballBuilder, WhiteoutSpec, }; -use nydus_rafs::metadata::{RafsSuper, RafsSuperConfig, RafsVersion}; +use nydus_rafs::metadata::{MergeError, RafsSuper, RafsSuperConfig, RafsVersion}; use nydus_storage::backend::localfs::LocalFs; use nydus_storage::backend::BlobBackend; use nydus_storage::device::BlobFeatures; @@ -758,7 +758,11 @@ fn main() -> Result<()> { } } } else if let Some(matches) = cmd.subcommand_matches("merge") { - Command::merge(matches, &build_info) + let result = Command::merge(matches, &build_info); + if let Err(MergeError::InconsistentFilesystem(_)) = result { + std::process::exit(2); + } + result.map_err(|err| anyhow!(err)) } else if let Some(matches) = cmd.subcommand_matches("check") { Command::check(matches, &build_info) } else if let Some(matches) = cmd.subcommand_matches("export") { @@ -1164,7 +1168,7 @@ impl Command { Ok(()) } - fn merge(matches: &ArgMatches, build_info: &BuildTimeInfo) -> Result<()> { + fn merge(matches: &ArgMatches, build_info: &BuildTimeInfo) -> Result<(), MergeError> { let source_bootstrap_paths: Vec = matches .get_many::("SOURCE") .map(|paths| paths.map(PathBuf::from).collect()) @@ -1238,7 +1242,7 @@ impl Command { chunk_dict_path, config, )?; - OutputSerializer::dump(matches, output, build_info) + OutputSerializer::dump(matches, output, build_info).map_err(MergeError::Other) } fn compact(matches: &ArgMatches, build_info: &BuildTimeInfo) -> Result<()> {