Skip to content

Commit

Permalink
feat: notifying incosistent fs version problem with exit code
Browse files Browse the repository at this point in the history
If acceld converts with different fs version cache, leading to an inconsistent fs version problem when merging into boostrap layer. So we need to notify acceld that an inconsistent version occured and handle this error.

Signed-off-by: YuQiang <[email protected]>
  • Loading branch information
PerseidMeteor committed Sep 25, 2023
1 parent 278915b commit 8d09de2
Show file tree
Hide file tree
Showing 5 changed files with 94 additions and 58 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

111 changes: 65 additions & 46 deletions builder/src/merge.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@ use std::convert::TryFrom;
use std::path::PathBuf;
use std::sync::Arc;

use anyhow::{anyhow, bail, ensure, Context, Result};
use anyhow::{anyhow, bail, Context, Ok, Result};
use hex::FromHex;
use nydus_api::ConfigV2;
use nydus_rafs::metadata::{RafsSuper, RafsVersion};
use nydus_rafs::metadata::{MergeError, RafsSuper, RafsVersion};
use nydus_storage::device::{BlobFeatures, BlobInfo};
use nydus_utils::crypt;

Expand Down Expand Up @@ -83,49 +83,56 @@ impl Merger {
target: ArtifactStorage,
chunk_dict: Option<PathBuf>,
config_v2: Arc<ConfigV2>,
) -> Result<BuildOutput> {
) -> Result<BuildOutput, MergeError> {
if sources.is_empty() {
bail!("source bootstrap list is empty , at least one bootstrap is required");
return Err(MergeError::Other(anyhow!(
"source bootstrap list is empty , at least one bootstrap is required"
)));
}
if let Some(digests) = blob_digests.as_ref() {
ensure!(
digests.len() == sources.len(),
"number of blob digest entries {} doesn't match number of sources {}",
digests.len(),
sources.len(),
);
if digests.len() != sources.len() {
return Err(MergeError::Other(anyhow!(
"number of blob digest entries {} doesn't match number of sources {}",
digests.len(),
sources.len()
)));
}
}
if let Some(original_ids) = original_blob_ids.as_ref() {
ensure!(
original_ids.len() == sources.len(),
"number of original blob id entries {} doesn't match number of sources {}",
original_ids.len(),
sources.len(),
);
if original_ids.len() != sources.len() {
return Err(MergeError::Other(anyhow!(
"number of original blob id entries {} doesn't match number of sources {}",
original_ids.len(),
sources.len()
)));
}
}
if let Some(sizes) = blob_sizes.as_ref() {
ensure!(
sizes.len() == sources.len(),
"number of blob size entries {} doesn't match number of sources {}",
sizes.len(),
sources.len(),
);
if sizes.len() != sources.len() {
return Err(MergeError::Other(anyhow!(
"number of blob size entries {} doesn't match number of sources {}",
sizes.len(),
sources.len()
)));
}
}
if let Some(toc_digests) = blob_toc_digests.as_ref() {
ensure!(
toc_digests.len() == sources.len(),
"number of toc digest entries {} doesn't match number of sources {}",
toc_digests.len(),
sources.len(),
);
if toc_digests.len() != sources.len() {
return Err(MergeError::Other(anyhow!(
"number of toc digest entries {} doesn't match number of sources {}",
toc_digests.len(),
sources.len()
)));
}
}
if let Some(sizes) = blob_toc_sizes.as_ref() {
ensure!(
sizes.len() == sources.len(),
"number of toc size entries {} doesn't match number of sources {}",
sizes.len(),
sources.len(),
);
if sizes.len() != sources.len() {
return Err(MergeError::Other(anyhow!(
"number of toc size entries {0} doesn't match number of sources {1}",
sizes.len(),
sources.len()
)));
}
}

let mut tree: Option<Tree> = None;
Expand Down Expand Up @@ -177,7 +184,11 @@ impl Merger {
match rs.meta.get_cipher() {
crypt::Algorithm::None => (),
crypt::Algorithm::Aes128Xts => ctx.cipher = crypt::Algorithm::Aes128Xts,
_ => bail!("invalid per layer bootstrap, only supports aes-128-xts"),
_ => {
return Err(MergeError::Other(anyhow!(
"invalid per layer bootstrap, only supports aes-128-xts"
)))
}
}
ctx.explicit_uidgid = rs.meta.explicit_uidgid();
if config.as_ref().unwrap().is_tarfs_mode {
Expand All @@ -190,13 +201,12 @@ impl Merger {
for blob in blobs {
let mut blob_ctx = BlobContext::from(ctx, &blob, ChunkSource::Parent)?;
if let Some(chunk_size) = chunk_size {
ensure!(
chunk_size == blob_ctx.chunk_size,
"can not merge bootstraps with inconsistent chunk size, current bootstrap {:?} with chunk size {:x}, expected {:x}",
if chunk_size != blob_ctx.chunk_size {
return Err(MergeError::Other(anyhow!("can not merge bootstraps with inconsistent chunk size, current bootstrap {:?} with chunk size {:x}, expected {:x}",
bootstrap_path,
blob_ctx.chunk_size,
chunk_size,
);
chunk_size)));
}
} else {
chunk_size = Some(blob_ctx.chunk_size);
}
Expand All @@ -205,7 +215,9 @@ impl Merger {
// use the same chunk dict bootstrap. So the parent bootstrap includes multiple blobs, but
// only at most one new blob, the other blobs should be from the chunk dict image.
if parent_blob_added {
bail!("invalid per layer bootstrap, having multiple associated data blobs");
return Err(MergeError::Other(anyhow!(
"invalid per layer bootstrap, having multiple associated data blobs"
)));
}
parent_blob_added = true;

Expand All @@ -222,8 +234,11 @@ impl Merger {
{
blob_ctx.blob_id = original_id;
} else {
blob_ctx.blob_id =
BlobInfo::get_blob_id_from_meta_path(bootstrap_path)?;
let blob_id = BlobInfo::get_blob_id_from_meta_path(bootstrap_path);
if let Err(err) = blob_id {
return Err(MergeError::Other(anyhow!(err)));
}
blob_ctx.blob_id = blob_id.unwrap();
}
}
if let Some(digest) = Self::get_digest_from_list(&blob_digests, layer_idx)? {
Expand Down Expand Up @@ -290,10 +305,14 @@ impl Merger {

if ctx.conversion_type == ConversionType::TarToTarfs {
if parent_layers > 0 {
bail!("merging RAFS in TARFS mode conflicts with `--parent-bootstrap`");
return Err(MergeError::Other(anyhow!(
"merging RAFS in TARFS mode conflicts with `--parent-bootstrap`"
)));
}
if !chunk_dict_blobs.is_empty() {
bail!("merging RAFS in TARFS mode conflicts with `--chunk-dict`");
return Err(MergeError::Other(anyhow!(
"merging RAFS in TARFS mode conflicts with `--chunk-dict`"
)));
}
}

Expand All @@ -312,6 +331,6 @@ impl Merger {
bootstrap
.dump(ctx, &mut bootstrap_storage, &mut bootstrap_ctx, &blob_table)
.context(format!("dump bootstrap to {:?}", target.display()))?;
BuildOutput::new(&blob_mgr, &bootstrap_storage)
BuildOutput::new(&blob_mgr, &bootstrap_storage).map_err(MergeError::Other)
}
}
1 change: 1 addition & 0 deletions rafs/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ serde = { version = "1.0.110", features = ["serde_derive", "rc"] }
serde_json = "1.0.53"
vm-memory = "0.10"
fuse-backend-rs = "^0.10.3"
thiserror = "1"

nydus-api = { version = "0.3", path = "../api" }
nydus-storage = { version = "0.6", path = "../storage", features = ["backend-localfs"] }
Expand Down
27 changes: 19 additions & 8 deletions rafs/src/metadata/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ use std::path::{Component, Path, PathBuf};
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use thiserror::Error;

use anyhow::bail;
use fuse_backend_rs::abi::fuse_abi::Attr;
Expand Down Expand Up @@ -397,34 +398,42 @@ pub struct RafsSuperConfig {
pub is_tarfs_mode: bool,
}

#[derive(Error, Debug)]
pub enum MergeError {
#[error("Inconsistent RAFS Filesystem: {0}")]
InconsistentFilesystem(String),
#[error(transparent)]
Other(#[from] anyhow::Error),
}

impl RafsSuperConfig {
/// Check compatibility for two RAFS filesystems.
pub fn check_compatibility(&self, meta: &RafsSuperMeta) -> Result<()> {
pub fn check_compatibility(&self, meta: &RafsSuperMeta) -> anyhow::Result<(), MergeError> {
if self.chunk_size != meta.chunk_size {
return Err(einval!(format!(
return Err(MergeError::InconsistentFilesystem(format!(
"Inconsistent configuration of chunk_size: {} vs {}",
self.chunk_size, meta.chunk_size
)));
}

if self.explicit_uidgid != meta.explicit_uidgid() {
return Err(einval!(format!(
return Err(MergeError::InconsistentFilesystem(format!(
"Using inconsistent explicit_uidgid setting {:?}, target explicit_uidgid setting {:?}",
self.explicit_uidgid,
meta.explicit_uidgid()
)));
}

if u32::from(self.version) != meta.version {
return Err(einval!(format!(
let meta_version = RafsVersion::try_from(meta.version);
return Err(MergeError::InconsistentFilesystem(format!(
"Using inconsistent RAFS version {:?}, target RAFS version {:?}",
self.version,
RafsVersion::try_from(meta.version)?
self.version, meta_version
)));
}

if self.version == RafsVersion::V5 && self.digester != meta.get_digester() {
return Err(einval!(format!(
return Err(MergeError::InconsistentFilesystem(format!(
"RAFS v5 can not support different digest algorithm due to inode digest, {} vs {}",
self.digester,
meta.get_digester()
Expand All @@ -433,7 +442,9 @@ impl RafsSuperConfig {

let is_tarfs_mode = meta.flags.contains(RafsSuperFlags::TARTFS_MODE);
if is_tarfs_mode != self.is_tarfs_mode {
return Err(einval!(format!("Using inconsistent RAFS TARFS mode")));
return Err(MergeError::InconsistentFilesystem(
"Using inconsistent RAFS TARFS mode".to_string(),
));
}

Ok(())
Expand Down
12 changes: 8 additions & 4 deletions src/bin/nydus-image/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ use nydus_builder::{
BuildContext, BuildOutput, Builder, ConversionType, DirectoryBuilder, Feature, Features,
HashChunkDict, Merger, Prefetch, PrefetchPolicy, StargzBuilder, TarballBuilder, WhiteoutSpec,
};
use nydus_rafs::metadata::{RafsSuper, RafsSuperConfig, RafsVersion};
use nydus_rafs::metadata::{MergeError, RafsSuper, RafsSuperConfig, RafsVersion};
use nydus_storage::backend::localfs::LocalFs;
use nydus_storage::backend::BlobBackend;
use nydus_storage::device::BlobFeatures;
Expand Down Expand Up @@ -758,7 +758,11 @@ fn main() -> Result<()> {
}
}
} else if let Some(matches) = cmd.subcommand_matches("merge") {
Command::merge(matches, &build_info)
let result = Command::merge(matches, &build_info);
if let Err(MergeError::InconsistentFilesystem(_)) = result {
std::process::exit(2);
}
result.map_err(|err| anyhow!(err))
} else if let Some(matches) = cmd.subcommand_matches("check") {
Command::check(matches, &build_info)
} else if let Some(matches) = cmd.subcommand_matches("export") {
Expand Down Expand Up @@ -1164,7 +1168,7 @@ impl Command {
Ok(())
}

fn merge(matches: &ArgMatches, build_info: &BuildTimeInfo) -> Result<()> {
fn merge(matches: &ArgMatches, build_info: &BuildTimeInfo) -> Result<(), MergeError> {
let source_bootstrap_paths: Vec<PathBuf> = matches
.get_many::<String>("SOURCE")
.map(|paths| paths.map(PathBuf::from).collect())
Expand Down Expand Up @@ -1238,7 +1242,7 @@ impl Command {
chunk_dict_path,
config,
)?;
OutputSerializer::dump(matches, output, build_info)
OutputSerializer::dump(matches, output, build_info).map_err(MergeError::Other)
}

fn compact(matches: &ArgMatches, build_info: &BuildTimeInfo) -> Result<()> {
Expand Down

0 comments on commit 8d09de2

Please sign in to comment.