diff --git a/api/src/config.rs b/api/src/config.rs index e606770ddc9..431ea735126 100644 --- a/api/src/config.rs +++ b/api/src/config.rs @@ -1194,11 +1194,15 @@ fn default_work_dir() -> String { ".".to_string() } -pub fn default_batch_size() -> usize { +fn default_merging_size() -> usize { 128 * 1024 } -fn default_prefetch_batch_size() -> usize { +fn default_batch_size() -> usize { + 1024 * 1024 +} + +pub fn default_prefetch_batch_size() -> usize { 1024 * 1024 } @@ -1411,7 +1415,7 @@ struct FsPrefetchControl { pub threads_count: usize, /// Window size in unit of bytes to merge request to backend. - #[serde(default = "default_batch_size")] + #[serde(default = "default_merging_size")] pub merging_size: usize, /// Network bandwidth limitation for prefetching. diff --git a/service/src/fs_cache.rs b/service/src/fs_cache.rs index a98fb1c5cf3..e45ad84e74d 100644 --- a/service/src/fs_cache.rs +++ b/service/src/fs_cache.rs @@ -518,8 +518,8 @@ impl FsCacheHandler { .map_err(|e| eother!(format!("failed to start prefetch worker, {}", e)))?; let size = match cache_cfg.prefetch.batch_size.checked_next_power_of_two() { - None => nydus_api::default_batch_size() as u64, - Some(1) => nydus_api::default_batch_size() as u64, + None => nydus_api::default_prefetch_batch_size() as u64, + Some(1) => nydus_api::default_prefetch_batch_size() as u64, Some(s) => s as u64, }; let size = std::cmp::max(0x4_0000u64, size); diff --git a/storage/src/cache/filecache/mod.rs b/storage/src/cache/filecache/mod.rs index 2b158ca09b1..6e7b4bfd7a8 100644 --- a/storage/src/cache/filecache/mod.rs +++ b/storage/src/cache/filecache/mod.rs @@ -23,7 +23,6 @@ use crate::cache::state::{ use crate::cache::worker::{AsyncPrefetchConfig, AsyncWorkerMgr}; use crate::cache::{BlobCache, BlobCacheMgr}; use crate::device::{BlobFeatures, BlobInfo}; -use crate::RAFS_DEFAULT_CHUNK_SIZE; pub const BLOB_RAW_FILE_SUFFIX: &str = ".blob.raw"; pub const BLOB_DATA_FILE_SUFFIX: &str = ".blob.data"; @@ -46,6 +45,7 @@ pub struct FileCacheMgr { cache_convergent_encryption: bool, cache_encryption_key: String, closed: Arc, + ondemand_batch_size: Option, } impl FileCacheMgr { @@ -55,6 +55,7 @@ impl FileCacheMgr { backend: Arc, runtime: Arc, id: &str, + ondemand_batch_size: Option, ) -> Result { let blob_cfg = config.get_filecache_config()?; let work_dir = blob_cfg.get_work_dir()?; @@ -77,6 +78,7 @@ impl FileCacheMgr { cache_convergent_encryption: blob_cfg.enable_convergent_encryption, cache_encryption_key: blob_cfg.encryption_key.clone(), closed: Arc::new(AtomicBool::new(false)), + ondemand_batch_size, }) } @@ -339,7 +341,9 @@ impl FileCacheEntry { is_zran, dio_enabled: false, need_validation, - batch_size: RAFS_DEFAULT_CHUNK_SIZE, + // If none, use default 0 since it's not used. + // e.g., at build time. + batch_size: mgr.ondemand_batch_size.unwrap_or(0), prefetch_config, }) } diff --git a/storage/src/cache/fscache/mod.rs b/storage/src/cache/fscache/mod.rs index cf624f4f427..278054a5850 100644 --- a/storage/src/cache/fscache/mod.rs +++ b/storage/src/cache/fscache/mod.rs @@ -20,7 +20,6 @@ use crate::cache::worker::{AsyncPrefetchConfig, AsyncWorkerMgr}; use crate::cache::{BlobCache, BlobCacheMgr}; use crate::device::{BlobFeatures, BlobInfo, BlobObject}; use crate::factory::BLOB_FACTORY; -use crate::RAFS_DEFAULT_CHUNK_SIZE; use crate::cache::filecache::BLOB_DATA_FILE_SUFFIX; @@ -40,6 +39,7 @@ pub struct FsCacheMgr { need_validation: bool, blobs_check_count: Arc, closed: Arc, + ondemand_batch_size: Option, } impl FsCacheMgr { @@ -49,6 +49,7 @@ impl FsCacheMgr { backend: Arc, runtime: Arc, id: &str, + ondemand_batch_size: Option, ) -> Result { if config.cache_compressed { return Err(enosys!("fscache doesn't support compressed cache mode")); @@ -73,6 +74,7 @@ impl FsCacheMgr { need_validation: config.cache_validate, blobs_check_count: Arc::new(AtomicU8::new(0)), closed: Arc::new(AtomicBool::new(false)), + ondemand_batch_size, }) } @@ -290,7 +292,9 @@ impl FileCacheEntry { is_zran, dio_enabled: true, need_validation, - batch_size: RAFS_DEFAULT_CHUNK_SIZE, + // If none, use default 0 since it's not used. + // e.g., at build time. + batch_size: mgr.ondemand_batch_size.unwrap_or(0), prefetch_config, }) } diff --git a/storage/src/factory.rs b/storage/src/factory.rs index cc37a4e913c..eeebfefb7e8 100644 --- a/storage/src/factory.rs +++ b/storage/src/factory.rs @@ -117,6 +117,10 @@ impl BlobFactory { ) -> IOResult> { let backend_cfg = config.get_backend_config()?; let cache_cfg = config.get_cache_config()?; + let ondemand_batch_size = match config.get_rafs_config() { + Ok(v) => Some(v.batch_size as u64), + Err(_) => None, + }; let key = BlobCacheMgrKey { config: config.clone(), }; @@ -128,7 +132,13 @@ impl BlobFactory { let backend = Self::new_backend(backend_cfg, &blob_info.blob_id())?; let mgr = match cache_cfg.cache_type.as_str() { "blobcache" | "filecache" => { - let mgr = FileCacheMgr::new(cache_cfg, backend, ASYNC_RUNTIME.clone(), &config.id)?; + let mgr = FileCacheMgr::new( + cache_cfg, + backend, + ASYNC_RUNTIME.clone(), + &config.id, + ondemand_batch_size, + )?; mgr.init()?; Arc::new(mgr) as Arc } @@ -139,6 +149,7 @@ impl BlobFactory { backend, ASYNC_RUNTIME.clone(), &config.id, + ondemand_batch_size, )?; mgr.init()?; Arc::new(mgr) as Arc