Skip to content

Commit

Permalink
feat: nydus support encrypted images
Browse files Browse the repository at this point in the history
Extend native nydus v6 to support handling encrypted
containers images:
* An encrypted nydus image is composed of encrypted
bootstrap and chunk-level encrypted data blobs. The
bootstrap is encrypted by the Ocicrypt and the data
blobs are encrypted by aes-128-xts with randomly
generated key and iv at chunk-level.
* For every data blob, all the chunk data, conpression
context. table and compression context table header
are encrypted.
* The chunk encryption key and iv are stored in the blob
info reusing some items of the structure to save reserved
space.
* Encrypted chunk data will be decrypted and then be
decompressed while be fetched by the storage backend.
* Encrypted or unencrypted blobs can be merged together.

Signed-off-by: taohong <[email protected]>
  • Loading branch information
taoohong committed Jun 23, 2023
1 parent ee433ab commit ad64e56
Show file tree
Hide file tree
Showing 25 changed files with 678 additions and 175 deletions.
4 changes: 4 additions & 0 deletions builder/src/compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -552,6 +552,9 @@ impl BlobCompactor {
build_ctx.blob_features,
build_ctx.compressor,
build_ctx.digester,
build_ctx.cipher,
Default::default(),
None,
);
blob_ctx.set_meta_info_enabled(self.is_v6());
let blob_idx = self.new_blob_mgr.alloc_index()?;
Expand Down Expand Up @@ -606,6 +609,7 @@ impl BlobCompactor {
None,
false,
Features::new(),
false,
);
let mut bootstrap_mgr =
BootstrapManager::new(Some(ArtifactStorage::SingleFile(d_bootstrap)), None);
Expand Down
18 changes: 13 additions & 5 deletions builder/src/core/blob.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ use anyhow::{Context, Result};
use nydus_rafs::metadata::RAFS_MAX_CHUNK_SIZE;
use nydus_storage::device::BlobFeatures;
use nydus_storage::meta::{toc, BlobMetaChunkArray};
use nydus_utils::compress;
use nydus_utils::digest::{self, DigestHasher, RafsDigest};
use nydus_utils::{compress, crypt};
use sha2::digest::Digest;

use super::layout::BlobLayout;
Expand Down Expand Up @@ -159,6 +159,9 @@ impl Blob {
}

// Prepare blob meta information data.
let encrypt = ctx.cipher != crypt::Algorithm::None;
let cipher_obj = &blob_ctx.cipher_object;
let cipher_ctx = &blob_ctx.cipher_ctx;
let blob_meta_info = &blob_ctx.blob_meta_info;
let mut ci_data = blob_meta_info.as_byte_slice();
let mut inflate_buf = Vec::new();
Expand Down Expand Up @@ -194,8 +197,11 @@ impl Blob {
if !compressed {
compressor = compress::Algorithm::None;
}

let encrypted_ci_data =
crypt::encrypt_with_context(&compressed_data, cipher_obj, cipher_ctx, encrypt)?;
let compressed_offset = blob_writer.pos()?;
let compressed_size = compressed_data.len() as u64;
let compressed_size = encrypted_ci_data.len() as u64;
let uncompressed_size = ci_data.len() as u64;

header.set_ci_compressor(compressor);
Expand All @@ -212,18 +218,20 @@ impl Blob {
header.set_inlined_chunk_digest(true);
}

let header_size = header.as_bytes().len();
blob_ctx.blob_meta_header = header;
let encrypted_header =
crypt::encrypt_with_context(header.as_bytes(), cipher_obj, cipher_ctx, encrypt)?;
let header_size = encrypted_header.len();

// Write blob meta data and header
match compressed_data {
match encrypted_ci_data {
Cow::Owned(v) => blob_ctx.write_data(blob_writer, &v)?,
Cow::Borrowed(v) => {
let buf = v.to_vec();
blob_ctx.write_data(blob_writer, &buf)?;
}
}
blob_ctx.write_data(blob_writer, header.as_bytes())?;
blob_ctx.write_data(blob_writer, &encrypted_header)?;

// Write tar header for `blob.meta`.
if ctx.blob_inline_meta || ctx.features.is_enabled(Feature::BlobToc) {
Expand Down
70 changes: 69 additions & 1 deletion builder/src/core/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ use std::sync::{Arc, Mutex};
use std::{fmt, fs};

use anyhow::{anyhow, Context, Error, Result};
use nydus_utils::crypt::{self, Cipher, CipherContext};
use sha2::{Digest, Sha256};
use tar::{EntryType, Header};
use vmm_sys_util::tempfile::TempFile;
Expand Down Expand Up @@ -373,6 +374,7 @@ pub struct BlobContext {
pub blob_hash: Sha256,
pub blob_compressor: compress::Algorithm,
pub blob_digester: digest::Algorithm,
pub blob_cipher: crypt::Algorithm,
pub blob_prefetch_size: u64,
/// Whether to generate blob metadata information.
pub blob_meta_info_enabled: bool,
Expand Down Expand Up @@ -412,16 +414,23 @@ pub struct BlobContext {
pub blob_toc_size: u32,

pub entry_list: toc::TocEntryList,
/// Cipher to encrypt the RAFS blobs.
pub cipher_object: Arc<Cipher>,
pub cipher_ctx: Option<CipherContext>,
}

impl BlobContext {
/// Create a new instance of [BlobContext].
#[allow(clippy::too_many_arguments)]
pub fn new(
blob_id: String,
blob_offset: u64,
features: BlobFeatures,
compressor: compress::Algorithm,
digester: digest::Algorithm,
cipher: crypt::Algorithm,
cipher_object: Arc<Cipher>,
cipher_ctx: Option<CipherContext>,
) -> Self {
let blob_meta_info = if features.contains(BlobFeatures::CHUNK_INFO_V2) {
BlobMetaChunkArray::new_v2()
Expand All @@ -433,6 +442,7 @@ impl BlobContext {
blob_hash: Sha256::new(),
blob_compressor: compressor,
blob_digester: digester,
blob_cipher: cipher,
blob_prefetch_size: 0,
blob_meta_info_enabled: false,
blob_meta_info,
Expand All @@ -455,6 +465,8 @@ impl BlobContext {
blob_toc_size: 0,

entry_list: toc::TocEntryList::new(),
cipher_object,
cipher_ctx,
};

blob_ctx
Expand Down Expand Up @@ -578,7 +590,18 @@ impl BlobContext {
}
}

let mut blob_ctx = Self::new(blob_id, 0, features, blob.compressor(), blob.digester());
let (cipher, cipher_object, cipher_ctx) = blob.get_cipher_info();

let mut blob_ctx = Self::new(
blob_id,
0,
features,
blob.compressor(),
blob.digester(),
cipher,
cipher_object,
cipher_ctx,
);
blob_ctx.blob_prefetch_size = blob.prefetch_size();
blob_ctx.chunk_count = blob.chunk_count();
blob_ctx.uncompressed_blob_size = blob.uncompressed_size();
Expand Down Expand Up @@ -630,6 +653,15 @@ impl BlobContext {
self.blob_meta_info_enabled = enable;
}

pub fn set_cipher_info(
&mut self,
cipher_object: Arc<Cipher>,
cipher_ctx: Option<CipherContext>,
) {
self.cipher_object = cipher_object;
self.cipher_ctx = cipher_ctx;
}

pub fn add_chunk_meta_info(
&mut self,
chunk: &ChunkWrapper,
Expand Down Expand Up @@ -658,6 +690,7 @@ impl BlobContext {
chunk.uncompressed_offset(),
chunk.uncompressed_size(),
chunk.is_compressed(),
chunk.is_encrypted(),
chunk.is_batch(),
0,
);
Expand Down Expand Up @@ -751,12 +784,33 @@ impl BlobManager {
}

fn new_blob_ctx(ctx: &BuildContext) -> Result<BlobContext> {
let (cipher_object, cipher_ctx) = match ctx.cipher {
crypt::Algorithm::None => (Default::default(), None),
crypt::Algorithm::Aes128Xts => {
let key = crypt::Cipher::generate_key_for_aes_xts(crypt::AES_128_XTS_KEY_LENGTH)?;
let iv = crypt::Cipher::generate_random_iv(crypt::AES_XTS_IV_LENGTH)?;
let cipher_ctx = CipherContext::new(key, iv, false)?;
(
ctx.cipher.new_cipher().ok().unwrap_or(Default::default()),
Some(cipher_ctx),
)
}
_ => {
return Err(anyhow!(format!(
"cipher algorithm {:?} does not support",
ctx.cipher
)))
}
};
let mut blob_ctx = BlobContext::new(
ctx.blob_id.clone(),
ctx.blob_offset,
ctx.blob_features,
ctx.compressor,
ctx.digester,
ctx.cipher,
Arc::new(cipher_object),
cipher_ctx,
);
blob_ctx.set_chunk_size(ctx.chunk_size);
blob_ctx.set_meta_info_enabled(
Expand Down Expand Up @@ -936,6 +990,7 @@ impl BlobManager {
RafsBlobTable::V6(table) => {
flags |= RafsSuperFlags::from(ctx.blob_compressor);
flags |= RafsSuperFlags::from(ctx.blob_digester);
flags |= RafsSuperFlags::from(ctx.blob_cipher);
table.add(
blob_id,
0,
Expand All @@ -950,6 +1005,8 @@ impl BlobManager {
ctx.blob_meta_size,
ctx.blob_toc_size,
ctx.blob_meta_header,
ctx.cipher_object.clone(),
ctx.cipher_ctx.clone(),
);
}
}
Expand Down Expand Up @@ -1087,6 +1144,8 @@ pub struct BuildContext {
pub compressor: compress::Algorithm,
/// Inode and chunk digest algorithm flag.
pub digester: digest::Algorithm,
/// Blob encryption algorithm flag.
pub cipher: crypt::Algorithm,
/// Save host uid gid in each inode.
pub explicit_uidgid: bool,
/// whiteout spec: overlayfs or oci
Expand Down Expand Up @@ -1138,6 +1197,7 @@ impl BuildContext {
blob_storage: Option<ArtifactStorage>,
blob_inline_meta: bool,
features: Features,
encrypt: bool,
) -> Self {
// It's a flag for images built with new nydus-image 2.2 and newer.
let mut blob_features = BlobFeatures::CAP_TAR_TOC;
Expand All @@ -1153,12 +1213,19 @@ impl BuildContext {
blob_features |= BlobFeatures::TARFS;
}

let cipher = if encrypt {
crypt::Algorithm::Aes128Xts
} else {
crypt::Algorithm::None
};

BuildContext {
blob_id,
aligned_chunk,
blob_offset,
compressor,
digester,
cipher,
explicit_uidgid,
whiteout_spec,

Expand Down Expand Up @@ -1208,6 +1275,7 @@ impl Default for BuildContext {
blob_offset: 0,
compressor: compress::Algorithm::default(),
digester: digest::Algorithm::default(),
cipher: crypt::Algorithm::None,
explicit_uidgid: true,
whiteout_spec: WhiteoutSpec::default(),

Expand Down
23 changes: 17 additions & 6 deletions builder/src/core/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ use nydus_rafs::metadata::layout::RafsXAttrs;
use nydus_rafs::metadata::{Inode, RafsVersion};
use nydus_storage::device::BlobFeatures;
use nydus_storage::meta::{BlobChunkInfoV2Ondisk, BlobMetaChunkInfo};
use nydus_utils::compress;
use nydus_utils::digest::{DigestHasher, RafsDigest};
use nydus_utils::{compress, crypt};
use nydus_utils::{div_round_up, event_tracer, root_tracer, try_round_up_4k, ByteSize};
use sha2::digest::Digest;

Expand Down Expand Up @@ -380,6 +380,10 @@ impl Node {
chunk.set_id(RafsDigest::from_buf(buf, ctx.digester));
}

if ctx.cipher != crypt::Algorithm::None {
chunk.set_encrypted(true);
}

Ok((chunk, chunk_info))
}

Expand Down Expand Up @@ -407,6 +411,7 @@ impl Node {
chunk.set_uncompressed_size(d_size);

let mut chunk_info = None;
let encrypted = blob_ctx.blob_cipher != crypt::Algorithm::None;

if self.inode.child_count() == 1
&& d_size < ctx.batch_size / 2
Expand All @@ -417,7 +422,7 @@ impl Node {

if batch.chunk_data_buf_len() as u32 + d_size < ctx.batch_size {
// Add into current batch chunk directly.
chunk_info = Some(batch.generate_chunk_info(pre_d_offset, d_size)?);
chunk_info = Some(batch.generate_chunk_info(pre_d_offset, d_size, encrypted)?);
batch.append_chunk_data_buf(chunk_data);
} else {
// Dump current batch chunk if exists, and then add into a new batch chunk.
Expand All @@ -430,7 +435,7 @@ impl Node {
}

// Add into a new batch chunk.
chunk_info = Some(batch.generate_chunk_info(pre_d_offset, d_size)?);
chunk_info = Some(batch.generate_chunk_info(pre_d_offset, d_size, encrypted)?);
batch.append_chunk_data_buf(chunk_data);
}
} else if !ctx.blob_features.contains(BlobFeatures::SEPARATE) {
Expand Down Expand Up @@ -470,12 +475,18 @@ impl Node {
) -> Result<(u64, u32, bool)> {
let (compressed, is_compressed) = compress::compress(chunk_data, ctx.compressor)
.with_context(|| "failed to compress node file".to_string())?;
let compressed_size = compressed.len() as u32;
let encrypted = crypt::encrypt_with_context(
&compressed,
&blob_ctx.cipher_object,
&blob_ctx.cipher_ctx,
blob_ctx.blob_cipher != crypt::Algorithm::None,
)?;
let compressed_size = encrypted.len() as u32;
let pre_compressed_offset = blob_ctx.current_compressed_offset;
blob_writer
.write_all(&compressed)
.write_all(&encrypted)
.context("failed to write blob")?;
blob_ctx.blob_hash.update(&compressed);
blob_ctx.blob_hash.update(&encrypted);
blob_ctx.current_compressed_offset += compressed_size as u64;
blob_ctx.compressed_blob_size += compressed_size as u64;

Expand Down
1 change: 1 addition & 0 deletions builder/src/core/v6.rs
Original file line number Diff line number Diff line change
Expand Up @@ -692,6 +692,7 @@ impl Bootstrap {
let mut ext_sb = RafsV6SuperBlockExt::new();
ext_sb.set_compressor(ctx.compressor);
ext_sb.set_digester(ctx.digester);
ext_sb.set_cipher(ctx.cipher);
ext_sb.set_chunk_size(ctx.chunk_size);
ext_sb.set_blob_table_offset(blob_table_offset);
ext_sb.set_blob_table_size(blob_table_size as u32);
Expand Down
7 changes: 7 additions & 0 deletions builder/src/merge.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use hex::FromHex;
use nydus_api::ConfigV2;
use nydus_rafs::metadata::{RafsSuper, RafsVersion};
use nydus_storage::device::{BlobFeatures, BlobInfo};
use nydus_utils::crypt;

use super::{
ArtifactStorage, BlobContext, BlobManager, Bootstrap, BootstrapContext, BuildContext,
Expand Down Expand Up @@ -149,6 +150,12 @@ impl Merger {
.context("failed to get RAFS version number")?;
ctx.compressor = rs.meta.get_compressor();
ctx.digester = rs.meta.get_digester();
// If any RAFS filesystems are encrypted, the merged boostrap will be marked as encrypted.
match rs.meta.get_cipher() {
crypt::Algorithm::None => (),
crypt::Algorithm::Aes128Xts => ctx.cipher = crypt::Algorithm::Aes128Xts,
_ => bail!("invalid per layer bootstrap, only supports aes-128-xts"),
}
ctx.explicit_uidgid = rs.meta.explicit_uidgid();
if config.as_ref().unwrap().is_tarfs_mode {
ctx.conversion_type = ConversionType::TarToTarfs;
Expand Down
1 change: 1 addition & 0 deletions builder/src/stargz.rs
Original file line number Diff line number Diff line change
Expand Up @@ -934,6 +934,7 @@ mod tests {
Some(ArtifactStorage::FileDir(tmp_dir.clone())),
false,
Features::new(),
false,
);
ctx.fs_version = RafsVersion::V6;
let mut bootstrap_mgr =
Expand Down
Loading

0 comments on commit ad64e56

Please sign in to comment.