Skip to content

Commit

Permalink
rafs: optimize the way to build RAFS filesystem
Browse files Browse the repository at this point in the history
The current way to build RAFS filesystem is:
- build the lower tree from parent bootstrap
- convert the lower tree into an array
- build the upper tree from source
- merge the upper tree into the lower tree
- convert the merged tree into another array
- dump nodes from the array

Now we optimize it as:
- build the lower tree from parent bootstrap
- build the upper tree from source
- merge the upper tree into the lower tree
- dump the merged tree

Signed-off-by: Jiang Liu <[email protected]>
  • Loading branch information
jiangliu authored and imeoer committed May 23, 2023
1 parent 80bd7dc commit 809f8d9
Show file tree
Hide file tree
Showing 21 changed files with 1,457 additions and 1,349 deletions.
236 changes: 130 additions & 106 deletions rafs/src/builder/compact.rs

Large diffs are not rendered by default.

22 changes: 12 additions & 10 deletions rafs/src/builder/core/blob.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
// SPDX-License-Identifier: Apache-2.0

use std::borrow::Cow;
use std::collections::VecDeque;
use std::io::Write;
use std::slice;

Expand All @@ -17,7 +16,7 @@ use sha2::digest::Digest;
use super::layout::BlobLayout;
use super::node::Node;
use crate::builder::{
ArtifactWriter, BlobContext, BlobManager, BuildContext, ConversionType, Feature,
ArtifactWriter, BlobContext, BlobManager, BuildContext, ConversionType, Feature, Tree,
};
use crate::metadata::RAFS_MAX_CHUNK_SIZE;

Expand All @@ -28,17 +27,17 @@ impl Blob {
/// Dump blob file and generate chunks
pub(crate) fn dump(
ctx: &BuildContext,
nodes: &mut VecDeque<Node>,
tree: &Tree,
blob_mgr: &mut BlobManager,
blob_writer: &mut ArtifactWriter,
) -> Result<()> {
match ctx.conversion_type {
ConversionType::DirectoryToRafs => {
let (inodes, prefetch_entries) =
BlobLayout::layout_blob_simple(&ctx.prefetch, nodes)?;
let mut chunk_data_buf = vec![0u8; RAFS_MAX_CHUNK_SIZE as usize];
for (idx, inode) in inodes.iter().enumerate() {
let node = &mut nodes[*inode];
let (inodes, prefetch_entries) =
BlobLayout::layout_blob_simple(&ctx.prefetch, tree)?;
for (idx, node) in inodes.iter().enumerate() {
let mut node = node.lock().unwrap();
let size = node
.dump_node_data(ctx, blob_mgr, blob_writer, &mut chunk_data_buf)
.context("failed to dump blob chunks")?;
Expand All @@ -55,8 +54,8 @@ impl Blob {
| ConversionType::EStargzToRafs => {
Self::finalize_blob_data(ctx, blob_mgr, blob_writer)?;
}
ConversionType::TarToRef
| ConversionType::TarToTarfs
ConversionType::TarToTarfs
| ConversionType::TarToRef
| ConversionType::TargzToRef
| ConversionType::EStargzToRef => {
// Use `sha256(tarball)` as `blob_id` for ref-type conversions.
Expand All @@ -81,10 +80,12 @@ impl Blob {
}
Self::finalize_blob_data(ctx, blob_mgr, blob_writer)?;
}
ConversionType::EStargzIndexToRef => {
Self::finalize_blob_data(ctx, blob_mgr, blob_writer)?;
}
ConversionType::TarToStargz
| ConversionType::DirectoryToTargz
| ConversionType::DirectoryToStargz
| ConversionType::EStargzIndexToRef
| ConversionType::TargzToStargz => {
unimplemented!()
}
Expand Down Expand Up @@ -118,6 +119,7 @@ impl Blob {
}
}
}

if !ctx.blob_features.contains(BlobFeatures::SEPARATE)
&& (ctx.blob_inline_meta || ctx.features.is_enabled(Feature::BlobToc))
{
Expand Down
Loading

0 comments on commit 809f8d9

Please sign in to comment.