Skip to content

Commit

Permalink
nydus-image: modify compact interface
Browse files Browse the repository at this point in the history
This commit uses compact parameter directly  insteadof compact config
file in the cli interface. It also fix a bug where chunk key for
ChunkWrapper::Ref is not generated correctly.

Signed-off-by: Yifan Zhao <[email protected]>
  • Loading branch information
SToPire committed Sep 29, 2024
1 parent a4683ba commit cc7f21e
Show file tree
Hide file tree
Showing 3 changed files with 93 additions and 54 deletions.
29 changes: 18 additions & 11 deletions builder/src/compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,22 +48,30 @@ pub struct Config {
/// available value: 0-99, 0 means disable
/// hint: it's better to disable this option when there are some shared blobs
/// for example: build-cache
#[serde(default)]
min_used_ratio: u8,
pub min_used_ratio: u8,
/// we compact blobs whose size are less than compact_blob_size
#[serde(default = "default_compact_blob_size")]
compact_blob_size: usize,
pub compact_blob_size: usize,
/// size of compacted blobs should not be larger than max_compact_size
#[serde(default = "default_max_compact_size")]
max_compact_size: usize,
pub max_compact_size: usize,
/// if number of blobs >= layers_to_compact, do compact
/// 0 means always try compact
#[serde(default)]
layers_to_compact: usize,
pub layers_to_compact: usize,
/// local blobs dir, may haven't upload to backend yet
/// what's more, new blobs will output to this dir
/// name of blob file should be equal to blob_id
blobs_dir: String,
pub blobs_dir: String,
}

impl Default for Config {
fn default() -> Self {
Self {
min_used_ratio: 0,
compact_blob_size: default_compact_blob_size(),
max_compact_size: default_max_compact_size(),
layers_to_compact: 0,
blobs_dir: String::new(),
}
}
}

#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
Expand All @@ -79,7 +87,7 @@ impl ChunkKey {
match c {
ChunkWrapper::V5(_) => Self::Digest(*c.id()),
ChunkWrapper::V6(_) => Self::Offset(c.blob_index(), c.compressed_offset()),
ChunkWrapper::Ref(_) => unimplemented!("unsupport ChunkWrapper::Ref(c)"),
ChunkWrapper::Ref(_) => Self::Digest(*c.id()),
}
}
}
Expand Down Expand Up @@ -790,7 +798,6 @@ mod tests {
}

#[test]
#[should_panic = "not implemented: unsupport ChunkWrapper::Ref(c)"]
fn test_chunk_key_from() {
let cw = ChunkWrapper::new(RafsVersion::V5);
matches!(ChunkKey::from(&cw), ChunkKey::Digest(_));
Expand Down
1 change: 1 addition & 0 deletions builder/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ pub use self::chunkdict_generator::ChunkdictBlobInfo;
pub use self::chunkdict_generator::ChunkdictChunkInfo;
pub use self::chunkdict_generator::Generator;
pub use self::compact::BlobCompactor;
pub use self::compact::Config as CompactConfig;
pub use self::core::bootstrap::Bootstrap;
pub use self::core::chunk_dict::{parse_chunk_dict_arg, ChunkDict, HashChunkDict};
pub use self::core::context::{
Expand Down
117 changes: 74 additions & 43 deletions src/bin/nydus-image/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ use crate::deduplicate::{
SqliteDatabase,
};
use std::convert::TryFrom;
use std::fs::{self, metadata, DirEntry, File, OpenOptions};
use std::fs::{self, metadata, DirEntry, OpenOptions};
use std::os::unix::fs::FileTypeExt;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
Expand Down Expand Up @@ -682,7 +682,6 @@ fn prepare_cmd_args(bti_string: &'static str) -> App {
.join(", ")
))
.required(false)
.group("backend"),
)
.arg(
Arg::new("backend-config")
Expand All @@ -698,21 +697,33 @@ fn prepare_cmd_args(bti_string: &'static str) -> App {
.required(false),
)
.arg(
Arg::new("blob")
.long("blob")
.short('b')
.help("Path to RAFS data blob file")
.required(false)
.group("backend"),
Arg::new("min-used-ratio")
.long("min-used-ratio")
.help("Lower bound of used ratio for blobs to be kept")
)
.arg(
Arg::new("compact-blob-size")
.long("compact-blob-size")
.help("Upper bound of blob size for blobs to be compacted")
)
.arg(
Arg::new("max-compact-size")
.long("max-compact-size")
.help("Upper bound of compacted blob size")
)
.arg(
Arg::new("layers-to-compact")
.long("layers-to-compact")
.help("If number of blobs >= layers_to_compact, do compact. 0 means always compact")
)
.arg(
Arg::new("blob-dir")
.long("blob-dir")
.short('D')
.help(
"Directory for localfs storage backend, hosting data blobs and cache files",
"Local blobs dir for blobs not in backend, also for output blobs",
)
.group("backend"),
.required(true),
)
.arg( arg_chunk_dict )
.arg(
Expand All @@ -724,11 +735,6 @@ fn prepare_cmd_args(bti_string: &'static str) -> App {
.arg(
arg_output_json,
)
.group(
clap::ArgGroup::new("backend")
.args(&["backend-type", "blob", "blob-dir"])
.required(false),
),
);

app.subcommand(
Expand Down Expand Up @@ -1504,7 +1510,12 @@ impl Command {
Some(s) => PathBuf::from(s),
};

let (config, backend) = Self::get_backend(matches, "compactor")?;
let (config, backend) = match Self::get_backend(matches, "compactor") {
Ok((c, b)) => (c, b),
Err(e) => {
bail!("{}, --blob-dir or --backend-type must be specified", e);
}
};

let (rs, _) = RafsSuper::load_from_file(&bootstrap_path, config.clone(), false)?;
info!("load bootstrap {:?} successfully", bootstrap_path);
Expand All @@ -1517,16 +1528,26 @@ impl Command {
)?),
};

let config_file_path = matches.get_one::<String>("config").unwrap();
let file = File::open(config_file_path)
.with_context(|| format!("failed to open config file {}", config_file_path))?;
let config = serde_json::from_reader(file)
.with_context(|| format!("invalid config file {}", config_file_path))?;
let mut compact_config: nydus_builder::CompactConfig = Default::default();
if let Some(min_used_ratio) = matches.get_one::<String>("min-used-ratio") {
compact_config.min_used_ratio = min_used_ratio.parse()?;
}
if let Some(compact_blob_size) = matches.get_one::<String>("compact-blob-size") {
compact_config.compact_blob_size = compact_blob_size.parse()?;
}
if let Some(max_compact_size) = matches.get_one::<String>("max-compact-size") {
compact_config.max_compact_size = max_compact_size.parse()?;
}
if let Some(layers_to_compact) = matches.get_one::<String>("layers-to-compact") {
compact_config.layers_to_compact = layers_to_compact.parse()?;
}

compact_config.blobs_dir = matches.get_one::<String>("blob-dir").unwrap().clone();

let version = rs.meta.version.try_into().unwrap();
let compressor = rs.meta.get_compressor();
if let Some(build_output) =
BlobCompactor::compact(rs, dst_bootstrap, chunk_dict, backend, &config)?
BlobCompactor::compact(rs, dst_bootstrap, chunk_dict, backend, &compact_config)?
{
OutputSerializer::dump(matches, build_output, build_info, compressor, version)?;
}
Expand All @@ -1539,7 +1560,31 @@ impl Command {
if output.is_empty() {
return Err(anyhow!("invalid empty --output option"));
}
let (config, backend) = Self::get_backend(matches, "unpacker")?;

let (config, backend): (Arc<ConfigV2>, Arc<dyn BlobBackend + Send + Sync>) =
// if --blob is specified, use localfs backend and default config
if let Some(p) = matches.get_one::<String>("blob") {
let blob_path = PathBuf::from(p);
let local_fs_conf = LocalFsConfig {
blob_file: blob_path.to_str().unwrap().to_owned(),
dir: Default::default(),
alt_dirs: Default::default(),
};
let local_fs = LocalFs::new(&local_fs_conf, Some("unpacker"))
.with_context(|| format!("fail to create local backend for {:?}", blob_path))?;

(Arc::new(ConfigV2::default()), Arc::new(local_fs))
} else {
match Self::get_backend(matches, "unpacker") {
Ok((c, b)) => (c, b),
Err(e) => {
bail!(
"{}, --blob, --blob-dir or --backend-type must be specified",
e
);
}
}
};

OCIUnpacker::new(bootstrap, Some(backend), output)
.with_context(|| "fail to create unpacker")?
Expand Down Expand Up @@ -1809,24 +1854,7 @@ impl Command {
) -> Result<(Arc<ConfigV2>, Arc<dyn BlobBackend + Send + Sync>)> {
let config: Arc<ConfigV2>;
let backend: Arc<dyn BlobBackend + Send + Sync>;
if let Some(p) = matches.get_one::<String>("blob") {
config = Arc::new(ConfigV2::default());
backend = {
let blob_path = PathBuf::from(p);
let local_fs_conf = LocalFsConfig {
blob_file: blob_path.to_str().unwrap().to_owned(),
dir: Default::default(),
alt_dirs: Default::default(),
};
let local_fs = LocalFs::new(&local_fs_conf, Some(blob_id))
.with_context(|| format!("fail to create local backend for {:?}", blob_path))?;

Arc::new(local_fs)
};
} else if let Some(dir) = matches.get_one::<String>("blob-dir") {
config = Arc::new(ConfigV2::new_localfs("", dir)?);
backend = BlobFactory::new_backend(&config.backend.as_ref().unwrap(), blob_id)?;
} else if let Some(backend_type) = matches.get_one::<String>("backend-type") {
if let Some(backend_type) = matches.get_one::<String>("backend-type") {
let content =
if let Some(backend_file) = matches.get_one::<String>("backend-config-file") {
fs::read_to_string(backend_file).with_context(|| {
Expand All @@ -1839,13 +1867,16 @@ impl Command {
};

if backend_type == "localfs" {
bail!("Use --blob-dir or --blob to specify localfs backend");
bail!("Use --blob-dir to specify localfs backend");
} else {
backend = BlobFactory::new_backend_from_json(backend_type, &content, blob_id)?;
config = Arc::new(ConfigV2::default());
}
} else if let Some(dir) = matches.get_one::<String>("blob-dir") {
config = Arc::new(ConfigV2::new_localfs("", dir)?);
backend = BlobFactory::new_backend(&config.backend.as_ref().unwrap(), blob_id)?;
} else {
bail!("--blob, --blob-dir or --backend-type must be specified");
return Err(anyhow!("invalid backend configuration"));
}

Ok((config, backend))
Expand Down

0 comments on commit cc7f21e

Please sign in to comment.