Skip to content

Commit

Permalink
nydusd: add the config support of amplify_io
Browse files Browse the repository at this point in the history
Add the support of `amplify_io` in the config file of nydusd
to configure read amplification.

Signed-off-by: Wenhao Ren <[email protected]>
  • Loading branch information
hangvane committed Oct 24, 2023
1 parent 3bb124b commit 760fbcb
Show file tree
Hide file tree
Showing 5 changed files with 33 additions and 10 deletions.
10 changes: 7 additions & 3 deletions api/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1194,11 +1194,15 @@ fn default_work_dir() -> String {
".".to_string()
}

pub fn default_batch_size() -> usize {
fn default_merging_size() -> usize {
128 * 1024
}

fn default_prefetch_batch_size() -> usize {
fn default_batch_size() -> usize {
1024 * 1024
}

pub fn default_prefetch_batch_size() -> usize {
1024 * 1024
}

Expand Down Expand Up @@ -1411,7 +1415,7 @@ struct FsPrefetchControl {
pub threads_count: usize,

/// Window size in unit of bytes to merge request to backend.
#[serde(default = "default_batch_size")]
#[serde(default = "default_merging_size")]
pub merging_size: usize,

/// Network bandwidth limitation for prefetching.
Expand Down
4 changes: 2 additions & 2 deletions service/src/fs_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -518,8 +518,8 @@ impl FsCacheHandler {
.map_err(|e| eother!(format!("failed to start prefetch worker, {}", e)))?;

let size = match cache_cfg.prefetch.batch_size.checked_next_power_of_two() {
None => nydus_api::default_batch_size() as u64,
Some(1) => nydus_api::default_batch_size() as u64,
None => nydus_api::default_prefetch_batch_size() as u64,
Some(1) => nydus_api::default_prefetch_batch_size() as u64,
Some(s) => s as u64,
};
let size = std::cmp::max(0x4_0000u64, size);
Expand Down
8 changes: 6 additions & 2 deletions storage/src/cache/filecache/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ use crate::cache::state::{
use crate::cache::worker::{AsyncPrefetchConfig, AsyncWorkerMgr};
use crate::cache::{BlobCache, BlobCacheMgr};
use crate::device::{BlobFeatures, BlobInfo};
use crate::RAFS_DEFAULT_CHUNK_SIZE;

pub const BLOB_RAW_FILE_SUFFIX: &str = ".blob.raw";
pub const BLOB_DATA_FILE_SUFFIX: &str = ".blob.data";
Expand All @@ -46,6 +45,7 @@ pub struct FileCacheMgr {
cache_convergent_encryption: bool,
cache_encryption_key: String,
closed: Arc<AtomicBool>,
ondemand_batch_size: Option<u64>,
}

impl FileCacheMgr {
Expand All @@ -55,6 +55,7 @@ impl FileCacheMgr {
backend: Arc<dyn BlobBackend>,
runtime: Arc<Runtime>,
id: &str,
ondemand_batch_size: Option<u64>,
) -> Result<FileCacheMgr> {
let blob_cfg = config.get_filecache_config()?;
let work_dir = blob_cfg.get_work_dir()?;
Expand All @@ -77,6 +78,7 @@ impl FileCacheMgr {
cache_convergent_encryption: blob_cfg.enable_convergent_encryption,
cache_encryption_key: blob_cfg.encryption_key.clone(),
closed: Arc::new(AtomicBool::new(false)),
ondemand_batch_size,
})
}

Expand Down Expand Up @@ -339,7 +341,9 @@ impl FileCacheEntry {
is_zran,
dio_enabled: false,
need_validation,
batch_size: RAFS_DEFAULT_CHUNK_SIZE,
// If none, use default 0 since it's not used.
// e.g., at build time.
batch_size: mgr.ondemand_batch_size.unwrap_or(0),
prefetch_config,
})
}
Expand Down
8 changes: 6 additions & 2 deletions storage/src/cache/fscache/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ use crate::cache::worker::{AsyncPrefetchConfig, AsyncWorkerMgr};
use crate::cache::{BlobCache, BlobCacheMgr};
use crate::device::{BlobFeatures, BlobInfo, BlobObject};
use crate::factory::BLOB_FACTORY;
use crate::RAFS_DEFAULT_CHUNK_SIZE;

use crate::cache::filecache::BLOB_DATA_FILE_SUFFIX;

Expand All @@ -40,6 +39,7 @@ pub struct FsCacheMgr {
need_validation: bool,
blobs_check_count: Arc<AtomicU8>,
closed: Arc<AtomicBool>,
ondemand_batch_size: Option<u64>,
}

impl FsCacheMgr {
Expand All @@ -49,6 +49,7 @@ impl FsCacheMgr {
backend: Arc<dyn BlobBackend>,
runtime: Arc<Runtime>,
id: &str,
ondemand_batch_size: Option<u64>,
) -> Result<FsCacheMgr> {
if config.cache_compressed {
return Err(enosys!("fscache doesn't support compressed cache mode"));
Expand All @@ -73,6 +74,7 @@ impl FsCacheMgr {
need_validation: config.cache_validate,
blobs_check_count: Arc::new(AtomicU8::new(0)),
closed: Arc::new(AtomicBool::new(false)),
ondemand_batch_size,
})
}

Expand Down Expand Up @@ -290,7 +292,9 @@ impl FileCacheEntry {
is_zran,
dio_enabled: true,
need_validation,
batch_size: RAFS_DEFAULT_CHUNK_SIZE,
// If none, use default 0 since it's not used.
// e.g., at build time.
batch_size: mgr.ondemand_batch_size.unwrap_or(0),
prefetch_config,
})
}
Expand Down
13 changes: 12 additions & 1 deletion storage/src/factory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,10 @@ impl BlobFactory {
) -> IOResult<Arc<dyn BlobCache>> {
let backend_cfg = config.get_backend_config()?;
let cache_cfg = config.get_cache_config()?;
let ondemand_batch_size = match config.get_rafs_config() {
Ok(v) => Some(v.batch_size as u64),
Err(_) => None,
};
let key = BlobCacheMgrKey {
config: config.clone(),
};
Expand All @@ -128,7 +132,13 @@ impl BlobFactory {
let backend = Self::new_backend(backend_cfg, &blob_info.blob_id())?;
let mgr = match cache_cfg.cache_type.as_str() {
"blobcache" | "filecache" => {
let mgr = FileCacheMgr::new(cache_cfg, backend, ASYNC_RUNTIME.clone(), &config.id)?;
let mgr = FileCacheMgr::new(
cache_cfg,
backend,
ASYNC_RUNTIME.clone(),
&config.id,
ondemand_batch_size,
)?;
mgr.init()?;
Arc::new(mgr) as Arc<dyn BlobCacheMgr>
}
Expand All @@ -139,6 +149,7 @@ impl BlobFactory {
backend,
ASYNC_RUNTIME.clone(),
&config.id,
ondemand_batch_size,
)?;
mgr.init()?;
Arc::new(mgr) as Arc<dyn BlobCacheMgr>
Expand Down

0 comments on commit 760fbcb

Please sign in to comment.