From 7103c7c56c5e7eb415afe8634fd3f3dcf776382d Mon Sep 17 00:00:00 2001 From: taohong Date: Tue, 4 Jul 2023 14:21:32 +0800 Subject: [PATCH 1/2] feat: nydus support encrypted images Extend native nydus v6 to support handling encrypted containers images: * An encrypted nydus image is composed of encrypted bootstrap and chunk-level encrypted data blobs. The bootstrap is encrypted by the Ocicrypt and the data blobs are encrypted by aes-128-xts with randomly generated key and iv at chunk-level. * For every data blob, all the chunk data, conpression context. table and compression context table header are encrypted. * The chunk encryption key and iv are stored in the blob info reusing some items of the structure to save reserved space. * Encrypted chunk data will be decrypted and then be decompressed while be fetched by the storage backend. * Encrypted or unencrypted blobs can be merged together. Signed-off-by: taohong --- builder/src/compact.rs | 4 + builder/src/core/blob.rs | 18 ++- builder/src/core/context.rs | 73 ++++++++- builder/src/core/node.rs | 23 ++- builder/src/core/v6.rs | 1 + builder/src/merge.rs | 7 + builder/src/stargz.rs | 1 + builder/src/tarball.rs | 33 ++++ rafs/src/metadata/chunk.rs | 25 +++ rafs/src/metadata/layout/v6.rs | 111 +++++++++++-- rafs/src/metadata/mod.rs | 37 ++++- src/bin/nydus-image/inspect.rs | 2 + src/bin/nydus-image/main.rs | 45 +++++- storage/src/cache/cachedfile.rs | 18 +-- storage/src/cache/dummycache.rs | 6 +- storage/src/cache/filecache/mod.rs | 8 +- storage/src/cache/mod.rs | 46 ++++-- storage/src/context.rs | 60 ------- storage/src/device.rs | 38 ++++- storage/src/lib.rs | 1 - storage/src/meta/batch.rs | 2 + storage/src/meta/chunk_info_v2.rs | 12 +- storage/src/meta/mod.rs | 70 +++++++-- storage/src/meta/zran.rs | 1 + utils/src/crypt.rs | 241 ++++++++++++++++++++++++----- 25 files changed, 704 insertions(+), 179 deletions(-) delete mode 100644 storage/src/context.rs diff --git a/builder/src/compact.rs b/builder/src/compact.rs index 3721c4694ac..19fce1d0137 100644 --- a/builder/src/compact.rs +++ b/builder/src/compact.rs @@ -552,6 +552,9 @@ impl BlobCompactor { build_ctx.blob_features, build_ctx.compressor, build_ctx.digester, + build_ctx.cipher, + Default::default(), + None, ); blob_ctx.set_meta_info_enabled(self.is_v6()); let blob_idx = self.new_blob_mgr.alloc_index()?; @@ -606,6 +609,7 @@ impl BlobCompactor { None, false, Features::new(), + false, ); let mut bootstrap_mgr = BootstrapManager::new(Some(ArtifactStorage::SingleFile(d_bootstrap)), None); diff --git a/builder/src/core/blob.rs b/builder/src/core/blob.rs index 8f18f2faad9..b2282933854 100644 --- a/builder/src/core/blob.rs +++ b/builder/src/core/blob.rs @@ -10,8 +10,8 @@ use anyhow::{Context, Result}; use nydus_rafs::metadata::RAFS_MAX_CHUNK_SIZE; use nydus_storage::device::BlobFeatures; use nydus_storage::meta::{toc, BlobMetaChunkArray}; -use nydus_utils::compress; use nydus_utils::digest::{self, DigestHasher, RafsDigest}; +use nydus_utils::{compress, crypt}; use sha2::digest::Digest; use super::layout::BlobLayout; @@ -159,6 +159,9 @@ impl Blob { } // Prepare blob meta information data. + let encrypt = ctx.cipher != crypt::Algorithm::None; + let cipher_obj = &blob_ctx.cipher_object; + let cipher_ctx = &blob_ctx.cipher_ctx; let blob_meta_info = &blob_ctx.blob_meta_info; let mut ci_data = blob_meta_info.as_byte_slice(); let mut inflate_buf = Vec::new(); @@ -194,8 +197,11 @@ impl Blob { if !compressed { compressor = compress::Algorithm::None; } + + let encrypted_ci_data = + crypt::encrypt_with_context(&compressed_data, cipher_obj, cipher_ctx, encrypt)?; let compressed_offset = blob_writer.pos()?; - let compressed_size = compressed_data.len() as u64; + let compressed_size = encrypted_ci_data.len() as u64; let uncompressed_size = ci_data.len() as u64; header.set_ci_compressor(compressor); @@ -212,18 +218,20 @@ impl Blob { header.set_inlined_chunk_digest(true); } - let header_size = header.as_bytes().len(); blob_ctx.blob_meta_header = header; + let encrypted_header = + crypt::encrypt_with_context(header.as_bytes(), cipher_obj, cipher_ctx, encrypt)?; + let header_size = encrypted_header.len(); // Write blob meta data and header - match compressed_data { + match encrypted_ci_data { Cow::Owned(v) => blob_ctx.write_data(blob_writer, &v)?, Cow::Borrowed(v) => { let buf = v.to_vec(); blob_ctx.write_data(blob_writer, &buf)?; } } - blob_ctx.write_data(blob_writer, header.as_bytes())?; + blob_ctx.write_data(blob_writer, &encrypted_header)?; // Write tar header for `blob.meta`. if ctx.blob_inline_meta || ctx.features.is_enabled(Feature::BlobToc) { diff --git a/builder/src/core/context.rs b/builder/src/core/context.rs index 56d0a646532..36f48a3c325 100644 --- a/builder/src/core/context.rs +++ b/builder/src/core/context.rs @@ -17,6 +17,7 @@ use std::sync::{Arc, Mutex}; use std::{fmt, fs}; use anyhow::{anyhow, Context, Error, Result}; +use nydus_utils::crypt::{self, Cipher, CipherContext}; use sha2::{Digest, Sha256}; use tar::{EntryType, Header}; use vmm_sys_util::tempfile::TempFile; @@ -373,6 +374,7 @@ pub struct BlobContext { pub blob_hash: Sha256, pub blob_compressor: compress::Algorithm, pub blob_digester: digest::Algorithm, + pub blob_cipher: crypt::Algorithm, pub blob_prefetch_size: u64, /// Whether to generate blob metadata information. pub blob_meta_info_enabled: bool, @@ -412,16 +414,23 @@ pub struct BlobContext { pub blob_toc_size: u32, pub entry_list: toc::TocEntryList, + /// Cipher to encrypt the RAFS blobs. + pub cipher_object: Arc, + pub cipher_ctx: Option, } impl BlobContext { /// Create a new instance of [BlobContext]. + #[allow(clippy::too_many_arguments)] pub fn new( blob_id: String, blob_offset: u64, features: BlobFeatures, compressor: compress::Algorithm, digester: digest::Algorithm, + cipher: crypt::Algorithm, + cipher_object: Arc, + cipher_ctx: Option, ) -> Self { let blob_meta_info = if features.contains(BlobFeatures::CHUNK_INFO_V2) { BlobMetaChunkArray::new_v2() @@ -433,6 +442,7 @@ impl BlobContext { blob_hash: Sha256::new(), blob_compressor: compressor, blob_digester: digester, + blob_cipher: cipher, blob_prefetch_size: 0, blob_meta_info_enabled: false, blob_meta_info, @@ -455,6 +465,8 @@ impl BlobContext { blob_toc_size: 0, entry_list: toc::TocEntryList::new(), + cipher_object, + cipher_ctx, }; blob_ctx @@ -490,6 +502,9 @@ impl BlobContext { blob_ctx .blob_meta_header .set_tarfs(features.contains(BlobFeatures::TARFS)); + blob_ctx + .blob_meta_header + .set_encrypted(features.contains(BlobFeatures::ENCRYPTED)); blob_ctx } @@ -578,7 +593,18 @@ impl BlobContext { } } - let mut blob_ctx = Self::new(blob_id, 0, features, blob.compressor(), blob.digester()); + let (cipher, cipher_object, cipher_ctx) = blob.get_cipher_info(); + + let mut blob_ctx = Self::new( + blob_id, + 0, + features, + blob.compressor(), + blob.digester(), + cipher, + cipher_object, + cipher_ctx, + ); blob_ctx.blob_prefetch_size = blob.prefetch_size(); blob_ctx.chunk_count = blob.chunk_count(); blob_ctx.uncompressed_blob_size = blob.uncompressed_size(); @@ -630,6 +656,15 @@ impl BlobContext { self.blob_meta_info_enabled = enable; } + pub fn set_cipher_info( + &mut self, + cipher_object: Arc, + cipher_ctx: Option, + ) { + self.cipher_object = cipher_object; + self.cipher_ctx = cipher_ctx; + } + pub fn add_chunk_meta_info( &mut self, chunk: &ChunkWrapper, @@ -658,6 +693,7 @@ impl BlobContext { chunk.uncompressed_offset(), chunk.uncompressed_size(), chunk.is_compressed(), + chunk.is_encrypted(), chunk.is_batch(), 0, ); @@ -751,12 +787,33 @@ impl BlobManager { } fn new_blob_ctx(ctx: &BuildContext) -> Result { + let (cipher_object, cipher_ctx) = match ctx.cipher { + crypt::Algorithm::None => (Default::default(), None), + crypt::Algorithm::Aes128Xts => { + let key = crypt::Cipher::generate_random_key(ctx.cipher)?; + let iv = crypt::Cipher::generate_random_iv()?; + let cipher_ctx = CipherContext::new(key, iv, false, ctx.cipher)?; + ( + ctx.cipher.new_cipher().ok().unwrap_or(Default::default()), + Some(cipher_ctx), + ) + } + _ => { + return Err(anyhow!(format!( + "cipher algorithm {:?} does not support", + ctx.cipher + ))) + } + }; let mut blob_ctx = BlobContext::new( ctx.blob_id.clone(), ctx.blob_offset, ctx.blob_features, ctx.compressor, ctx.digester, + ctx.cipher, + Arc::new(cipher_object), + cipher_ctx, ); blob_ctx.set_chunk_size(ctx.chunk_size); blob_ctx.set_meta_info_enabled( @@ -936,6 +993,7 @@ impl BlobManager { RafsBlobTable::V6(table) => { flags |= RafsSuperFlags::from(ctx.blob_compressor); flags |= RafsSuperFlags::from(ctx.blob_digester); + flags |= RafsSuperFlags::from(ctx.blob_cipher); table.add( blob_id, 0, @@ -950,6 +1008,8 @@ impl BlobManager { ctx.blob_meta_size, ctx.blob_toc_size, ctx.blob_meta_header, + ctx.cipher_object.clone(), + ctx.cipher_ctx.clone(), ); } } @@ -1087,6 +1147,8 @@ pub struct BuildContext { pub compressor: compress::Algorithm, /// Inode and chunk digest algorithm flag. pub digester: digest::Algorithm, + /// Blob encryption algorithm flag. + pub cipher: crypt::Algorithm, /// Save host uid gid in each inode. pub explicit_uidgid: bool, /// whiteout spec: overlayfs or oci @@ -1138,6 +1200,7 @@ impl BuildContext { blob_storage: Option, blob_inline_meta: bool, features: Features, + encrypt: bool, ) -> Self { // It's a flag for images built with new nydus-image 2.2 and newer. let mut blob_features = BlobFeatures::CAP_TAR_TOC; @@ -1153,12 +1216,19 @@ impl BuildContext { blob_features |= BlobFeatures::TARFS; } + let cipher = if encrypt { + crypt::Algorithm::Aes128Xts + } else { + crypt::Algorithm::None + }; + BuildContext { blob_id, aligned_chunk, blob_offset, compressor, digester, + cipher, explicit_uidgid, whiteout_spec, @@ -1208,6 +1278,7 @@ impl Default for BuildContext { blob_offset: 0, compressor: compress::Algorithm::default(), digester: digest::Algorithm::default(), + cipher: crypt::Algorithm::None, explicit_uidgid: true, whiteout_spec: WhiteoutSpec::default(), diff --git a/builder/src/core/node.rs b/builder/src/core/node.rs index 44a59905aa1..5357d639a28 100644 --- a/builder/src/core/node.rs +++ b/builder/src/core/node.rs @@ -24,8 +24,8 @@ use nydus_rafs::metadata::layout::RafsXAttrs; use nydus_rafs::metadata::{Inode, RafsVersion}; use nydus_storage::device::BlobFeatures; use nydus_storage::meta::{BlobChunkInfoV2Ondisk, BlobMetaChunkInfo}; -use nydus_utils::compress; use nydus_utils::digest::{DigestHasher, RafsDigest}; +use nydus_utils::{compress, crypt}; use nydus_utils::{div_round_up, event_tracer, root_tracer, try_round_up_4k, ByteSize}; use sha2::digest::Digest; @@ -380,6 +380,10 @@ impl Node { chunk.set_id(RafsDigest::from_buf(buf, ctx.digester)); } + if ctx.cipher != crypt::Algorithm::None { + chunk.set_encrypted(true); + } + Ok((chunk, chunk_info)) } @@ -407,6 +411,7 @@ impl Node { chunk.set_uncompressed_size(d_size); let mut chunk_info = None; + let encrypted = blob_ctx.blob_cipher != crypt::Algorithm::None; if self.inode.child_count() == 1 && d_size < ctx.batch_size / 2 @@ -417,7 +422,7 @@ impl Node { if batch.chunk_data_buf_len() as u32 + d_size < ctx.batch_size { // Add into current batch chunk directly. - chunk_info = Some(batch.generate_chunk_info(pre_d_offset, d_size)?); + chunk_info = Some(batch.generate_chunk_info(pre_d_offset, d_size, encrypted)?); batch.append_chunk_data_buf(chunk_data); } else { // Dump current batch chunk if exists, and then add into a new batch chunk. @@ -430,7 +435,7 @@ impl Node { } // Add into a new batch chunk. - chunk_info = Some(batch.generate_chunk_info(pre_d_offset, d_size)?); + chunk_info = Some(batch.generate_chunk_info(pre_d_offset, d_size, encrypted)?); batch.append_chunk_data_buf(chunk_data); } } else if !ctx.blob_features.contains(BlobFeatures::SEPARATE) { @@ -470,12 +475,18 @@ impl Node { ) -> Result<(u64, u32, bool)> { let (compressed, is_compressed) = compress::compress(chunk_data, ctx.compressor) .with_context(|| "failed to compress node file".to_string())?; - let compressed_size = compressed.len() as u32; + let encrypted = crypt::encrypt_with_context( + &compressed, + &blob_ctx.cipher_object, + &blob_ctx.cipher_ctx, + blob_ctx.blob_cipher != crypt::Algorithm::None, + )?; + let compressed_size = encrypted.len() as u32; let pre_compressed_offset = blob_ctx.current_compressed_offset; blob_writer - .write_all(&compressed) + .write_all(&encrypted) .context("failed to write blob")?; - blob_ctx.blob_hash.update(&compressed); + blob_ctx.blob_hash.update(&encrypted); blob_ctx.current_compressed_offset += compressed_size as u64; blob_ctx.compressed_blob_size += compressed_size as u64; diff --git a/builder/src/core/v6.rs b/builder/src/core/v6.rs index f2ea8ff208d..f81e0b95432 100644 --- a/builder/src/core/v6.rs +++ b/builder/src/core/v6.rs @@ -692,6 +692,7 @@ impl Bootstrap { let mut ext_sb = RafsV6SuperBlockExt::new(); ext_sb.set_compressor(ctx.compressor); ext_sb.set_digester(ctx.digester); + ext_sb.set_cipher(ctx.cipher); ext_sb.set_chunk_size(ctx.chunk_size); ext_sb.set_blob_table_offset(blob_table_offset); ext_sb.set_blob_table_size(blob_table_size as u32); diff --git a/builder/src/merge.rs b/builder/src/merge.rs index 2ab173bbee2..8301b32cf8d 100644 --- a/builder/src/merge.rs +++ b/builder/src/merge.rs @@ -13,6 +13,7 @@ use hex::FromHex; use nydus_api::ConfigV2; use nydus_rafs::metadata::{RafsSuper, RafsVersion}; use nydus_storage::device::{BlobFeatures, BlobInfo}; +use nydus_utils::crypt; use super::{ ArtifactStorage, BlobContext, BlobManager, Bootstrap, BootstrapContext, BuildContext, @@ -149,6 +150,12 @@ impl Merger { .context("failed to get RAFS version number")?; ctx.compressor = rs.meta.get_compressor(); ctx.digester = rs.meta.get_digester(); + // If any RAFS filesystems are encrypted, the merged boostrap will be marked as encrypted. + match rs.meta.get_cipher() { + crypt::Algorithm::None => (), + crypt::Algorithm::Aes128Xts => ctx.cipher = crypt::Algorithm::Aes128Xts, + _ => bail!("invalid per layer bootstrap, only supports aes-128-xts"), + } ctx.explicit_uidgid = rs.meta.explicit_uidgid(); if config.as_ref().unwrap().is_tarfs_mode { ctx.conversion_type = ConversionType::TarToTarfs; diff --git a/builder/src/stargz.rs b/builder/src/stargz.rs index 06736ee41ac..424e9792553 100644 --- a/builder/src/stargz.rs +++ b/builder/src/stargz.rs @@ -934,6 +934,7 @@ mod tests { Some(ArtifactStorage::FileDir(tmp_dir.clone())), false, Features::new(), + false, ); ctx.fs_version = RafsVersion::V6; let mut bootstrap_mgr = diff --git a/builder/src/tarball.rs b/builder/src/tarball.rs index 3cdba64a2e2..605fc7871ab 100644 --- a/builder/src/tarball.rs +++ b/builder/src/tarball.rs @@ -618,6 +618,39 @@ mod tests { Some(ArtifactStorage::FileDir(tmp_dir.clone())), false, Features::new(), + false, + ); + let mut bootstrap_mgr = + BootstrapManager::new(Some(ArtifactStorage::FileDir(tmp_dir)), None); + let mut blob_mgr = BlobManager::new(digest::Algorithm::Sha256); + let mut builder = TarballBuilder::new(ConversionType::TarToTarfs); + builder + .build(&mut ctx, &mut bootstrap_mgr, &mut blob_mgr) + .unwrap(); + } + + #[test] + fn test_build_encrypted_tarfs() { + let tmp_dir = vmm_sys_util::tempdir::TempDir::new().unwrap(); + let tmp_dir = tmp_dir.as_path().to_path_buf(); + let root_dir = &std::env::var("CARGO_MANIFEST_DIR").expect("$CARGO_MANIFEST_DIR"); + let source_path = PathBuf::from(root_dir).join("../tests/texture/tar/all-entry-type.tar"); + let prefetch = Prefetch::default(); + let mut ctx = BuildContext::new( + "test".to_string(), + true, + 0, + compress::Algorithm::None, + digest::Algorithm::Sha256, + true, + WhiteoutSpec::Oci, + ConversionType::TarToTarfs, + source_path, + prefetch, + Some(ArtifactStorage::FileDir(tmp_dir.clone())), + false, + Features::new(), + true, ); let mut bootstrap_mgr = BootstrapManager::new(Some(ArtifactStorage::FileDir(tmp_dir)), None); diff --git a/rafs/src/metadata/chunk.rs b/rafs/src/metadata/chunk.rs index 99b1e3840ea..5e8a3446d37 100644 --- a/rafs/src/metadata/chunk.rs +++ b/rafs/src/metadata/chunk.rs @@ -248,6 +248,27 @@ impl ChunkWrapper { } } + /// Check whether the chunk is encrypted or not. + pub fn is_encrypted(&self) -> bool { + match self { + ChunkWrapper::V5(c) => c.flags.contains(BlobChunkFlags::ENCYPTED), + ChunkWrapper::V6(c) => c.flags.contains(BlobChunkFlags::ENCYPTED), + ChunkWrapper::Ref(c) => as_blob_v5_chunk_info(c.deref()) + .flags() + .contains(BlobChunkFlags::ENCYPTED), + } + } + + /// Set flag for whether chunk is encrypted. + pub fn set_encrypted(&mut self, encrypted: bool) { + self.ensure_owned(); + match self { + ChunkWrapper::V5(c) => c.flags.set(BlobChunkFlags::ENCYPTED, encrypted), + ChunkWrapper::V6(c) => c.flags.set(BlobChunkFlags::ENCYPTED, encrypted), + ChunkWrapper::Ref(_c) => panic!("unexpected"), + } + } + /// Set flag for whether chunk is batch chunk. pub fn set_batch(&mut self, batch: bool) { self.ensure_owned(); @@ -281,6 +302,7 @@ impl ChunkWrapper { compressed_offset: u64, compressed_size: u32, is_compressed: bool, + is_encrypted: bool, ) -> Result<()> { self.ensure_owned(); match self { @@ -307,6 +329,9 @@ impl ChunkWrapper { if is_compressed { c.flags |= BlobChunkFlags::COMPRESSED; } + if is_encrypted { + c.flags |= BlobChunkFlags::ENCYPTED; + } } ChunkWrapper::Ref(_c) => panic!("unexpected"), } diff --git a/rafs/src/metadata/layout/v6.rs b/rafs/src/metadata/layout/v6.rs index cb7845ff1b4..d386855105c 100644 --- a/rafs/src/metadata/layout/v6.rs +++ b/rafs/src/metadata/layout/v6.rs @@ -4,7 +4,7 @@ // SPDX-License-Identifier: Apache-2.0 use std::collections::HashMap; -use std::convert::TryFrom; +use std::convert::{TryFrom, TryInto}; use std::ffi::{OsStr, OsString}; use std::fmt::Debug; use std::io::{Read, Result}; @@ -19,6 +19,7 @@ use nydus_storage::meta::{ BlobChunkInfoV1Ondisk, BlobChunkInfoV2Ondisk, BlobCompressionContextHeader, }; use nydus_storage::{RAFS_MAX_CHUNKS_PER_BLOB, RAFS_MAX_CHUNK_SIZE}; +use nydus_utils::crypt::{self, Cipher, CipherContext}; use nydus_utils::{compress, digest, round_up, ByteSize}; use crate::metadata::inode::InodeWrapper; @@ -573,6 +574,15 @@ impl RafsV6SuperBlockExt { self.set_chunk_table_size(size); } + /// Set encryption algorithm to encrypt chunks of the Rafs filesystem. + pub fn set_cipher(&mut self, cipher: crypt::Algorithm) { + let c: RafsSuperFlags = cipher.into(); + + self.s_flags &= !RafsSuperFlags::ENCRYPTION_NONE.bits(); + self.s_flags &= !RafsSuperFlags::ENCRYPTION_ASE_128_XTS.bits(); + self.s_flags |= c.bits(); + } + impl_pub_getter_setter!( chunk_table_offset, set_chunk_table_offset, @@ -1418,11 +1428,23 @@ struct RafsV6Blob { // SHA256 digest of RAFS blob for ZRAN, containing `blob.meta`, `blob.digest` `blob.toc` and // optionally 'image.boot`. It's all zero for ZRAN blobs with inlined-meta, so need special // handling. + // When using encryption mod, it's reused for saving encryption key. blob_meta_digest: [u8; 32], // Size of RAFS blob for ZRAN. It's zero ZRAN blobs with inlined-meta. + // When using encryption mod, it's reused for saving encryption iv first 8 bytes. blob_meta_size: u64, - - reserved2: [u8; 48], + // When using encryption mod, used for cipher_iv last 8 bytes. + // 0 7 15 + // +------------------+------------------+ + // | blob_meta_size | cipher_iv[8..16] | + // | 8bytes | 8bytes | + // +------------------+------------------+ + // \_ cipher_iv[0..16] _/ + cipher_iv: [u8; 8], + // Crypt algorithm for chunks in the blob. + cipher_algo: u32, + + reserved2: [u8; 36], } impl Default for RafsV6Blob { @@ -1446,8 +1468,10 @@ impl Default for RafsV6Blob { blob_meta_digest: [0u8; 32], blob_meta_size: 0, blob_toc_size: 0u32, + cipher_iv: [0u8; 8], + cipher_algo: (crypt::Algorithm::None as u32).to_le(), - reserved2: [0u8; 48], + reserved2: [0u8; 36], } } } @@ -1479,6 +1503,32 @@ impl RafsV6Blob { let digest = digest::Algorithm::try_from(u32::from_le(self.digest_algo)) .map_err(|_| einval!("invalid digest algorithm in Rafs v6 blob entry"))?; blob_info.set_digester(digest); + let cipher = crypt::Algorithm::try_from(u32::from_le(self.cipher_algo)) + .map_err(|_| einval!("invalid cipher algorithm in Rafs v6 blob entry"))?; + let cipher_object = cipher + .new_cipher() + .map_err(|e| einval!(format!("failed to create new cipher object {}", e)))?; + let cipher_context = match cipher { + crypt::Algorithm::None => None, + crypt::Algorithm::Aes128Xts => { + let mut cipher_iv = [0u8; 16]; + cipher_iv[..8].copy_from_slice(&self.blob_meta_size.to_le_bytes()); + cipher_iv[8..].copy_from_slice(&self.cipher_iv); + Some(CipherContext::new( + self.blob_meta_digest.to_vec(), + cipher_iv.to_vec(), + false, + cipher, + )?) + } + _ => { + return Err(einval!(format!( + "invalid cipher algorithm {:?} when creating cipher context", + cipher + ))) + } + }; + blob_info.set_cipher_info(cipher, Arc::new(cipher_object), cipher_context); blob_info.set_blob_meta_info( u64::from_le(self.ci_offset), u64::from_le(self.ci_compressed_size), @@ -1504,6 +1554,38 @@ impl RafsV6Blob { let mut blob_id = [0u8; BLOB_SHA256_LEN]; blob_id[..id.len()].copy_from_slice(id); + let (blob_meta_digest, blob_meta_size, cipher_iv) = match blob_info.cipher() { + crypt::Algorithm::None => ( + *blob_info.blob_meta_digest(), + blob_info.blob_meta_size(), + [0u8; 8], + ), + crypt::Algorithm::Aes128Xts => { + let cipher_ctx = match blob_info.cipher_context() { + Some(ctx) => ctx, + None => { + return Err(einval!( + "cipher context is unset while using Aes128Xts encryption algorithm" + )) + } + }; + let cipher_key: [u8; 32] = cipher_ctx.get_cipher_meta().0.try_into().unwrap(); + let (cipher_iv_top_half, cipher_iv_bottom_half) = + cipher_ctx.get_cipher_meta().1.split_at(8); + ( + cipher_key, + u64::from_le_bytes(cipher_iv_top_half.try_into().unwrap()), + cipher_iv_bottom_half.try_into().unwrap(), + ) + } + _ => { + return Err(einval!(format!( + "invalid cipher algorithm type {:?} in blob info", + blob_info.cipher() + ))) + } + }; + Ok(RafsV6Blob { blob_id, blob_index: blob_info.blob_index().to_le(), @@ -1520,11 +1602,13 @@ impl RafsV6Blob { ci_uncompressed_size: blob_info.meta_ci_uncompressed_size().to_le(), blob_toc_digest: *blob_info.blob_toc_digest(), - blob_meta_digest: *blob_info.blob_meta_digest(), - blob_meta_size: blob_info.blob_meta_size(), + blob_meta_digest, + blob_meta_size, blob_toc_size: blob_info.blob_toc_size(), + cipher_iv, + cipher_algo: (blob_info.cipher() as u32).to_le(), - reserved2: [0u8; 48], + reserved2: [0u8; 36], }) } @@ -1582,10 +1666,11 @@ impl RafsV6Blob { if compress::Algorithm::try_from(u32::from_le(self.compression_algo)).is_err() || compress::Algorithm::try_from(u32::from_le(self.ci_compressor)).is_err() || digest::Algorithm::try_from(u32::from_le(self.digest_algo)).is_err() + || crypt::Algorithm::try_from(self.cipher_algo).is_err() { error!( - "RafsV6Blob: idx {} invalid compression_algo {} ci_compressor {} digest_algo {}", - blob_index, self.compression_algo, self.ci_compressor, self.digest_algo + "RafsV6Blob: idx {} invalid compression_algo {} ci_compressor {} digest_algo {} cipher_algo {}", + blob_index, self.compression_algo, self.ci_compressor, self.digest_algo, self.cipher_algo, ); return false; } @@ -1641,7 +1726,8 @@ impl RafsV6Blob { let count = chunk_count as u64; if blob_features.contains(BlobFeatures::CHUNK_INFO_V2) && (blob_features.contains(BlobFeatures::BATCH) - || blob_features.contains(BlobFeatures::ZRAN)) + || blob_features.contains(BlobFeatures::ZRAN) + || blob_features.contains(BlobFeatures::ENCRYPTED)) { if ci_uncompr_size < count * size_of::() as u64 { error!( @@ -1660,6 +1746,7 @@ impl RafsV6Blob { } } else if blob_features.contains(BlobFeatures::BATCH) || blob_features.contains(BlobFeatures::ZRAN) + || blob_features.contains(BlobFeatures::ENCRYPTED) { error!( "RafsV6Blob: idx {} invalid feature bits {}", @@ -1733,6 +1820,8 @@ impl RafsV6BlobTable { blob_meta_size: u64, blob_toc_size: u32, header: BlobCompressionContextHeader, + cipher_object: Arc, + cipher_context: Option, ) -> u32 { let blob_index = self.entries.len() as u32; let blob_features = BlobFeatures::try_from(header.features()).unwrap(); @@ -1748,6 +1837,7 @@ impl RafsV6BlobTable { blob_info.set_compressor(flags.into()); blob_info.set_digester(flags.into()); + blob_info.set_cipher(flags.into()); blob_info.set_prefetch_info(prefetch_offset as u64, prefetch_size as u64); blob_info.set_blob_meta_info( header.ci_compressed_offset(), @@ -1759,6 +1849,7 @@ impl RafsV6BlobTable { blob_info.set_blob_toc_digest(blob_toc_digest); blob_info.set_blob_meta_size(blob_meta_size); blob_info.set_blob_toc_size(blob_toc_size); + blob_info.set_cipher_info(flags.into(), cipher_object, cipher_context); self.entries.push(Arc::new(blob_info)); diff --git a/rafs/src/metadata/mod.rs b/rafs/src/metadata/mod.rs index 3c54fb49ebe..c04c3b4da1d 100644 --- a/rafs/src/metadata/mod.rs +++ b/rafs/src/metadata/mod.rs @@ -26,8 +26,8 @@ use nydus_storage::device::{ BlobChunkInfo, BlobDevice, BlobFeatures, BlobInfo, BlobIoMerge, BlobIoVec, }; use nydus_storage::meta::toc::TocEntryList; -use nydus_utils::compress; use nydus_utils::digest::{self, RafsDigest}; +use nydus_utils::{compress, crypt}; use serde::Serialize; use self::layout::v5::RafsV5PrefetchTable; @@ -288,10 +288,12 @@ bitflags! { const INLINED_CHUNK_DIGEST = 0x0000_0100; /// RAFS works in Tarfs mode, which directly uses tar streams as data blobs. const TARTFS_MODE = 0x0000_0200; + /// Data chunks are not encrypted. + const ENCRYPTION_NONE = 0x0100_0000; + /// Data chunks are encrypted with AES-128-XTS. + const ENCRYPTION_ASE_128_XTS = 0x0200_0000; // Reserved for future compatible changes. - const PRESERVED_COMPAT_7 = 0x0100_0000; - const PRESERVED_COMPAT_6 = 0x0200_0000; const PRESERVED_COMPAT_5 = 0x0400_0000; const PRESERVED_COMPAT_4 = 0x0800_0000; const PRESERVED_COMPAT_3 = 0x1000_0000; @@ -356,6 +358,26 @@ impl From for RafsSuperFlags { } } +impl From for crypt::Algorithm { + fn from(flags: RafsSuperFlags) -> Self { + match flags { + // NOTE: only aes-128-xts encryption algorithm supported. + x if x.contains(RafsSuperFlags::ENCRYPTION_ASE_128_XTS) => crypt::Algorithm::Aes128Xts, + _ => crypt::Algorithm::None, + } + } +} + +impl From for RafsSuperFlags { + fn from(c: crypt::Algorithm) -> RafsSuperFlags { + match c { + // NOTE: only aes-128-xts encryption algorithm supported. + crypt::Algorithm::Aes128Xts => RafsSuperFlags::ENCRYPTION_ASE_128_XTS, + _ => RafsSuperFlags::ENCRYPTION_NONE, + } + } +} + /// Configuration information to check compatibility between RAFS filesystems. #[derive(Clone, Copy, Debug)] pub struct RafsSuperConfig { @@ -522,6 +544,15 @@ impl RafsSuperMeta { } } + /// V6: Check whether any data blobs may be encrypted. + pub fn get_cipher(&self) -> crypt::Algorithm { + if self.is_v6() { + self.flags.into() + } else { + crypt::Algorithm::None + } + } + /// Get `RafsSuperConfig` object to check compatibility. pub fn get_config(&self) -> RafsSuperConfig { RafsSuperConfig { diff --git a/src/bin/nydus-image/inspect.rs b/src/bin/nydus-image/inspect.rs index f9ca6998c6b..06af8cbaecf 100644 --- a/src/bin/nydus-image/inspect.rs +++ b/src/bin/nydus-image/inspect.rs @@ -307,6 +307,7 @@ Mapped Block Address: {mapped_blkaddr} Features: {features:?} Compressor: {compressor} Digester: {digester} +Cipher: {cipher} Chunk Size: 0x{chunk_size:x} Chunk Count: {chunk_count} Prefetch Table Offset: {prefetch_tbl_offset} @@ -331,6 +332,7 @@ RAFS Blob Size: {rafs_size} chunk_count = blob_info.chunk_count(), compressor = blob_info.compressor(), digester = blob_info.digester(), + cipher = blob_info.cipher(), prefetch_tbl_offset = blob_info.prefetch_offset(), prefetch_tbl_size = blob_info.prefetch_size(), meta_compressor = blob_info.meta_ci_compressor(), diff --git a/src/bin/nydus-image/main.rs b/src/bin/nydus-image/main.rs index f4a8b39f6e3..1eed52a2a9f 100644 --- a/src/bin/nydus-image/main.rs +++ b/src/bin/nydus-image/main.rs @@ -346,6 +346,14 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { .arg( arg_output_json.clone(), ) + .arg( + Arg::new("encrypt") + .long("encrypt") + .short('E') + .help("Encrypt the generated RAFS metadata and data blobs") + .action(ArgAction::SetTrue) + .required(false) + ) ); let app = app.subcommand( @@ -755,7 +763,7 @@ impl Command { .map(|s| s.as_str()) .unwrap_or_default(), )?; - + let encrypt = matches.get_flag("encrypt"); match conversion_type { ConversionType::DirectoryToRafs => { Self::ensure_directory(&source_path)?; @@ -813,6 +821,12 @@ impl Command { conversion_type ); } + if encrypt { + bail!( + "conversion type '{}' conflicts with '--encrypt'", + conversion_type + ) + } } ConversionType::TarToTarfs => { Self::ensure_file(&source_path)?; @@ -878,6 +892,12 @@ impl Command { conversion_type ); } + if encrypt { + bail!( + "conversion type '{}' conflicts with '--encrypt'", + conversion_type + ) + } } ConversionType::EStargzIndexToRef => { Self::ensure_file(&source_path)?; @@ -914,6 +934,12 @@ impl Command { if blob_id.trim() == "" { bail!("'--blob-id' is missing for '--type stargz_index'"); } + if encrypt { + bail!( + "conversion type '{}' conflicts with '--encrypt'", + conversion_type + ) + } } ConversionType::DirectoryToStargz | ConversionType::TargzToStargz @@ -943,6 +969,7 @@ impl Command { blob_storage, blob_inline_meta, features, + encrypt, ); build_ctx.set_fs_version(version); build_ctx.set_chunk_size(chunk_size); @@ -991,13 +1018,25 @@ impl Command { } let mut builder: Box = match conversion_type { - ConversionType::DirectoryToRafs => Box::new(DirectoryBuilder::new()), + ConversionType::DirectoryToRafs => { + if encrypt { + build_ctx.blob_features.insert(BlobFeatures::CHUNK_INFO_V2); + build_ctx.blob_features.insert(BlobFeatures::ENCRYPTED); + } + Box::new(DirectoryBuilder::new()) + } ConversionType::EStargzIndexToRef => { Box::new(StargzBuilder::new(blob_data_size, &build_ctx)) } ConversionType::EStargzToRafs | ConversionType::TargzToRafs - | ConversionType::TarToRafs => Box::new(TarballBuilder::new(conversion_type)), + | ConversionType::TarToRafs => { + if encrypt { + build_ctx.blob_features.insert(BlobFeatures::CHUNK_INFO_V2); + build_ctx.blob_features.insert(BlobFeatures::ENCRYPTED); + } + Box::new(TarballBuilder::new(conversion_type)) + } ConversionType::EStargzToRef | ConversionType::TargzToRef | ConversionType::TarToRef => { diff --git a/storage/src/cache/cachedfile.rs b/storage/src/cache/cachedfile.rs index c387400a648..0fc96c9cdc2 100644 --- a/storage/src/cache/cachedfile.rs +++ b/storage/src/cache/cachedfile.rs @@ -21,7 +21,7 @@ use std::time::Duration; use fuse_backend_rs::file_buf::FileVolatileSlice; use nix::sys::uio; use nydus_utils::compress::Decoder; -use nydus_utils::crypt::{self, Cipher}; +use nydus_utils::crypt::{self, Cipher, CipherContext}; use nydus_utils::metrics::{BlobcacheMetrics, Metric}; use nydus_utils::{compress, digest, round_up_usize, DelayType, Delayer, FileRangeReader}; use tokio::runtime::Runtime; @@ -30,7 +30,6 @@ use crate::backend::BlobReader; use crate::cache::state::ChunkMap; use crate::cache::worker::{AsyncPrefetchConfig, AsyncPrefetchMessage, AsyncWorkerMgr}; use crate::cache::{BlobCache, BlobIoMergeState}; -use crate::context::CipherContext; use crate::device::{ BlobChunkInfo, BlobInfo, BlobIoDesc, BlobIoRange, BlobIoSegment, BlobIoTag, BlobIoVec, BlobObject, BlobPrefetchRequest, @@ -195,7 +194,7 @@ impl FileCacheEntry { metrics.buffered_backend_size.sub(buffer.size() as u64); let mut t_buf; let buf = if !is_raw_data && is_cache_encrypted { - let (key, iv) = cipher_context.get_chunk_cipher_context(chunk.as_ref()); + let (key, iv) = cipher_context.generate_cipher_meta(&chunk.chunk_id().data); let buf = buffer.slice(); t_buf = alloc_buf(round_up_usize(buf.len(), ENCRYPTION_PAGE_SIZE)); @@ -465,6 +464,10 @@ impl BlobCache for FileCacheEntry { self.blob_info.cipher_object() } + fn blob_cipher_context(&self) -> Option { + self.blob_info.cipher_context() + } + fn blob_digester(&self) -> digest::Algorithm { self.blob_info.digester() } @@ -1308,7 +1311,7 @@ impl FileCacheEntry { let size = chunk.uncompressed_size() as usize; let cipher_object = self.cache_cipher_object.clone(); let cipher_context = self.cache_cipher_context.clone(); - let (key, iv) = cipher_context.get_chunk_cipher_context(chunk); + let (key, iv) = cipher_context.generate_cipher_meta(&chunk.chunk_id().data); let align_size = round_up_usize(size, ENCRYPTION_PAGE_SIZE); let mut buf = alloc_buf(align_size); @@ -1317,12 +1320,7 @@ impl FileCacheEntry { let mut pos = 0; while pos < buffer.len() { assert!(pos + ENCRYPTION_PAGE_SIZE <= buf.len()); - match cipher_object.decrypt( - key, - Some(&iv), - &buf[pos..pos + ENCRYPTION_PAGE_SIZE], - ENCRYPTION_PAGE_SIZE, - ) { + match cipher_object.decrypt(key, Some(&iv), &buf[pos..pos + ENCRYPTION_PAGE_SIZE]) { Ok(buf2) => { let len = std::cmp::min(buffer.len() - pos, ENCRYPTION_PAGE_SIZE); buffer[pos..pos + len].copy_from_slice(&buf2[..len]); diff --git a/storage/src/cache/dummycache.rs b/storage/src/cache/dummycache.rs index 9f61bcbd9ac..2e554b6cbc0 100644 --- a/storage/src/cache/dummycache.rs +++ b/storage/src/cache/dummycache.rs @@ -24,7 +24,7 @@ use std::sync::Arc; use fuse_backend_rs::file_buf::FileVolatileSlice; use nydus_api::CacheConfigV2; -use nydus_utils::crypt::{Algorithm, Cipher}; +use nydus_utils::crypt::{Algorithm, Cipher, CipherContext}; use nydus_utils::{compress, digest}; use crate::backend::{BlobBackend, BlobReader}; @@ -72,6 +72,10 @@ impl BlobCache for DummyCache { self.blob_info.cipher_object() } + fn blob_cipher_context(&self) -> Option { + self.blob_info.cipher_context() + } + fn blob_digester(&self) -> digest::Algorithm { self.digester } diff --git a/storage/src/cache/filecache/mod.rs b/storage/src/cache/filecache/mod.rs index e5048896146..31a7f3c9c47 100644 --- a/storage/src/cache/filecache/mod.rs +++ b/storage/src/cache/filecache/mod.rs @@ -22,7 +22,6 @@ use crate::cache::state::{ }; use crate::cache::worker::{AsyncPrefetchConfig, AsyncWorkerMgr}; use crate::cache::{BlobCache, BlobCacheMgr}; -use crate::context::CipherContext; use crate::device::{BlobFeatures, BlobInfo}; use crate::RAFS_DEFAULT_CHUNK_SIZE; @@ -290,7 +289,12 @@ impl FileCacheEntry { let key = hex::decode(mgr.cache_encryption_key.clone()) .map_err(|_e| einval!("invalid cache file encryption key"))?; let cipher = crypt::Algorithm::Aes128Xts.new_cipher()?; - let ctx = CipherContext::new(key, mgr.cache_convergent_encryption)?; + let ctx = crypt::CipherContext::new( + key, + [0u8; 16].to_vec(), + mgr.cache_convergent_encryption, + crypt::Algorithm::Aes128Xts, + )?; (Arc::new(cipher), Arc::new(ctx)) } else { (Default::default(), Default::default()) diff --git a/storage/src/cache/mod.rs b/storage/src/cache/mod.rs index e69ceb1eeb8..cc65f842919 100644 --- a/storage/src/cache/mod.rs +++ b/storage/src/cache/mod.rs @@ -23,7 +23,7 @@ use std::time::Instant; use fuse_backend_rs::file_buf::FileVolatileSlice; use nydus_utils::compress::zlib_random::ZranDecoder; -use nydus_utils::crypt::{self, Cipher}; +use nydus_utils::crypt::{self, Cipher, CipherContext}; use nydus_utils::{compress, digest}; use crate::backend::{BlobBackend, BlobReader}; @@ -156,6 +156,9 @@ pub trait BlobCache: Send + Sync { /// Cipher object to encrypt/decrypt chunk data. fn blob_cipher_object(&self) -> Arc; + /// Cipher context to encrypt/decrypt chunk data. + fn blob_cipher_context(&self) -> Option; + /// Get message digest algorithm to handle chunks in the blob. fn blob_digester(&self) -> digest::Algorithm; @@ -282,8 +285,8 @@ pub trait BlobCache: Send + Sync { /// Read a whole chunk directly from the storage backend. /// - /// The fetched chunk data may be compressed or not, which depends on chunk information from - /// `chunk`.Moreover, chunk data from backend storage may be validated per user's configuration. + /// The fetched chunk data may be compressed or encrypted or not, which depends on chunk information + /// from `chunk`. Moreover, chunk data from backend storage may be validated per user's configuration. fn read_chunk_from_backend( &self, chunk: &dyn BlobChunkInfo, @@ -295,7 +298,12 @@ pub trait BlobCache: Send + Sync { if self.is_zran() || self.is_batch() { return Err(enosys!("read_chunk_from_backend")); - } else if chunk.is_compressed() { + } else if !chunk.is_compressed() && !chunk.is_encrypted() { + let size = self.reader().read(buffer, offset).map_err(|e| eio!(e))?; + if size != buffer.len() { + return Err(eio!("storage backend returns less data than requested")); + } + } else { let c_size = if self.is_legacy_stargz() { self.get_legacy_stargz_size(offset, buffer.len())? } else { @@ -309,13 +317,14 @@ pub trait BlobCache: Send + Sync { if size != raw_buffer.len() { return Err(eio!("storage backend returns less data than requested")); } - self.decompress_chunk_data(&raw_buffer, buffer, true)?; + let decrypted_buffer = crypt::decrypt_with_context( + &raw_buffer, + &self.blob_cipher_object(), + &self.blob_cipher_context(), + chunk.is_encrypted(), + )?; + self.decompress_chunk_data(&decrypted_buffer, buffer, chunk.is_compressed())?; c_buf = Some(raw_buffer); - } else { - let size = self.reader().read(buffer, offset).map_err(|e| eio!(e))?; - if size != buffer.len() { - return Err(eio!("storage backend returns less data than requested")); - } } let duration = Instant::now().duration_since(start).as_millis(); @@ -447,10 +456,16 @@ impl<'a, 'b> ChunkDecompressState<'a, 'b> { let c_offset = (c_offset - self.blob_offset) as usize; let input = &self.c_buf[c_offset..c_offset + c_size as usize]; + let decrypted_buffer = crypt::decrypt_with_context( + input, + &self.cache.blob_cipher_object(), + &self.cache.blob_cipher_context(), + meta.state.is_encrypted(), + )?; let mut output = alloc_buf(d_size as usize); self.cache - .decompress_chunk_data(input, &mut output, c_size != d_size)?; + .decompress_chunk_data(&decrypted_buffer, &mut output, c_size != d_size)?; if output.len() != d_size as usize { return Err(einval!(format!( @@ -571,10 +586,15 @@ impl<'a, 'b> ChunkDecompressState<'a, 'b> { let offset_merged = (c_offset - self.blob_offset) as usize; let end_merged = offset_merged + c_size as usize; - let buf = &self.c_buf[offset_merged..end_merged]; + let decrypted_buffer = crypt::decrypt_with_context( + &self.c_buf[offset_merged..end_merged], + &self.cache.blob_cipher_object(), + &self.cache.blob_cipher_context(), + chunk.is_encrypted(), + )?; let mut buffer = alloc_buf(d_size); self.cache - .decompress_chunk_data(buf, &mut buffer, chunk.is_compressed())?; + .decompress_chunk_data(&decrypted_buffer, &mut buffer, chunk.is_compressed())?; self.cache .validate_chunk_data(chunk, &buffer, false) .map_err(|e| { diff --git a/storage/src/context.rs b/storage/src/context.rs deleted file mode 100644 index c6a83dbd91e..00000000000 --- a/storage/src/context.rs +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2021 Alibaba Cloud. All rights reserved. -// -// SPDX-License-Identifier: Apache-2.0 - -use std::io::Result; - -use crate::device::BlobChunkInfo; - -// Openssl rejects keys with identical first and second halves for xts. -// Use a default key for such cases. -const DEFAULT_CE_KEY: [u8; 32] = [ - 0xac, 0xed, 0x14, 0x69, 0x94, 0x23, 0x1e, 0xca, 0x44, 0x8c, 0xed, 0x2f, 0x6b, 0x40, 0x0c, 0x00, - 0xfd, 0xbb, 0x3f, 0xac, 0xdd, 0xc7, 0xd9, 0xee, 0x83, 0xf6, 0x5c, 0xd9, 0x3c, 0xaa, 0x28, 0x7c, -]; - -/// Struct to provide context information for data encryption/decryption. -#[derive(Default)] -pub struct CipherContext { - key: Vec, - convergent_encryption: bool, -} - -impl CipherContext { - /// Create a new instance of [CipherContext]. - pub fn new(key: Vec, convergent_encryption: bool) -> Result { - if key.len() != 32 { - return Err(einval!("invalid key length for encryption")); - } else if key[0..16] == key[16..32] { - return Err(einval!("invalid symmetry key for encryption")); - } - - Ok(CipherContext { - key, - convergent_encryption, - }) - } - - /// Get context information for chunk encryption/decryption. - pub fn get_chunk_cipher_context<'a>( - &'a self, - chunk: &'a dyn BlobChunkInfo, - ) -> (&'a [u8], Vec) { - let iv = vec![0u8; 16]; - if self.convergent_encryption { - let id = &chunk.chunk_id().data; - if id[0..16] == id[16..32] { - (&DEFAULT_CE_KEY, iv) - } else { - (&chunk.chunk_id().data, iv) - } - } else { - (&self.key, iv) - } - } - - /// Get context information for meta data encryption/decryption. - pub fn get_meta_cipher_context(&self) -> &[u8] { - &self.key - } -} diff --git a/storage/src/device.rs b/storage/src/device.rs index 78ec0b13f76..ec249d642d3 100644 --- a/storage/src/device.rs +++ b/storage/src/device.rs @@ -38,14 +38,14 @@ use fuse_backend_rs::file_traits::FileReadWriteVolatile; use nydus_api::ConfigV2; use nydus_utils::compress; -use nydus_utils::crypt::{self, Cipher}; +use nydus_utils::crypt::{self, Cipher, CipherContext}; use nydus_utils::digest::{self, RafsDigest}; use crate::cache::BlobCache; use crate::factory::BLOB_FACTORY; pub(crate) const BLOB_FEATURE_INCOMPAT_MASK: u32 = 0x0000_ffff; -pub(crate) const BLOB_FEATURE_INCOMPAT_VALUE: u32 = 0x0000_00ff; +pub(crate) const BLOB_FEATURE_INCOMPAT_VALUE: u32 = 0x0000_0fff; bitflags! { /// Features bits for blob management. @@ -66,6 +66,8 @@ bitflags! { const TARFS = 0x0000_0040; /// Small file chunk are merged into batch chunk. const BATCH = 0x0000_0080; + /// Whether the Blob is encrypted. + const ENCRYPTED = 0x0000_0100; /// Blob has TAR headers to separate contents. const HAS_TAR_HEADER = 0x1000_0000; /// Blob has Table of Content (ToC) at the tail. @@ -168,6 +170,8 @@ pub struct BlobInfo { meta_path: Arc>, /// V6: support data encryption. cipher_object: Arc, + /// Cipher context for encryption. + cipher_ctx: Option, } impl BlobInfo { @@ -210,6 +214,7 @@ impl BlobInfo { fs_cache_file: None, meta_path: Arc::new(Mutex::new(String::new())), cipher_object: Default::default(), + cipher_ctx: None, }; blob_info.compute_features(); @@ -307,15 +312,31 @@ impl BlobInfo { self.cipher } + /// Set encryption algorithm for the blob. + pub fn set_cipher(&mut self, cipher: crypt::Algorithm) { + self.cipher = cipher; + } + /// Get the cipher object to encrypt/decrypt chunk data. pub fn cipher_object(&self) -> Arc { self.cipher_object.clone() } - /// Set the cipher algorithm to handle chunk data. - pub fn set_cipher_info(&mut self, cipher: crypt::Algorithm, cipher_object: Arc) { + /// Get the cipher context. + pub fn cipher_context(&self) -> Option { + self.cipher_ctx.clone() + } + + /// Set the cipher info, including cipher algo, cipher object and cipher context. + pub fn set_cipher_info( + &mut self, + cipher: crypt::Algorithm, + cipher_object: Arc, + cipher_ctx: Option, + ) { self.cipher = cipher; self.cipher_object = cipher_object; + self.cipher_ctx = cipher_ctx; } /// Get the message digest algorithm for the blob. @@ -537,6 +558,15 @@ impl BlobInfo { }; Ok(id) } + + /// Get the cipher info, including cipher algo, cipher object and cipher context. + pub fn get_cipher_info(&self) -> (crypt::Algorithm, Arc, Option) { + ( + self.cipher, + self.cipher_object.clone(), + self.cipher_ctx.clone(), + ) + } } bitflags! { diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 8cb99d558f1..aae6deaea01 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -48,7 +48,6 @@ use std::fmt::{Display, Formatter}; pub mod backend; pub mod cache; -pub mod context; pub mod device; pub mod factory; pub mod meta; diff --git a/storage/src/meta/batch.rs b/storage/src/meta/batch.rs index f67ee674f8d..fefdc056d9b 100644 --- a/storage/src/meta/batch.rs +++ b/storage/src/meta/batch.rs @@ -126,6 +126,7 @@ impl BatchContextGenerator { &mut self, uncompressed_offset: u64, uncompressed_size: u32, + encrypted: bool, ) -> Result { let mut chunk = BlobChunkInfoV2Ondisk::default(); chunk.set_compressed_offset(0); @@ -136,6 +137,7 @@ impl BatchContextGenerator { chunk.set_batch_index(self.contexts.len() as u32); chunk.set_uncompressed_offset_in_batch_buf(self.chunk_data_buf_len() as u32); chunk.set_compressed(true); + chunk.set_encrypted(encrypted); Ok(chunk) } diff --git a/storage/src/meta/chunk_info_v2.rs b/storage/src/meta/chunk_info_v2.rs index 562082b009f..9a2ce255a9f 100644 --- a/storage/src/meta/chunk_info_v2.rs +++ b/storage/src/meta/chunk_info_v2.rs @@ -14,10 +14,10 @@ const CHUNK_V2_UNCOMP_OFFSET_SHIFT: u64 = 12; const CHUNK_V2_UNCOMP_SIZE_SHIFT: u64 = 32; const CHUNK_V2_FLAG_MASK: u64 = 0xff << 56; const CHUNK_V2_FLAG_COMPRESSED: u64 = 0x1 << 56; -const CHUNK_V2_FLAG_ENCRYPTED: u64 = 0x2 << 56; const CHUNK_V2_FLAG_ZRAN: u64 = 0x2 << 56; const CHUNK_V2_FLAG_BATCH: u64 = 0x4 << 56; -const CHUNK_V2_FLAG_VALID: u64 = 0x7 << 56; +const CHUNK_V2_FLAG_ENCRYPTED: u64 = 0x8 << 56; +const CHUNK_V2_FLAG_VALID: u64 = 0xf << 56; /// Chunk compression information on disk format V2. #[repr(C, packed)] @@ -40,7 +40,6 @@ impl BlobChunkInfoV2Ondisk { } } - #[allow(unused)] pub(crate) fn set_encrypted(&mut self, encrypted: bool) { if encrypted { self.uncomp_info |= u64::to_le(CHUNK_V2_FLAG_ENCRYPTED); @@ -198,10 +197,12 @@ impl BlobMetaChunkInfo for BlobChunkInfoV2Ondisk { || self.uncompressed_end() > state.uncompressed_size || self.uncompressed_size() == 0 || (!state.is_separate() && !self.is_batch() && self.compressed_size() == 0) - || (!self.is_compressed() && self.uncompressed_size() != self.compressed_size()) + || (!self.is_encrypted() + && !self.is_compressed() + && self.uncompressed_size() != self.compressed_size()) { return Err(einval!(format!( - "invalid chunk, blob: index {}/c_size 0x{:}/d_size 0x{:x}, chunk: c_end 0x{:x}/d_end 0x{:x}/compressed {} batch {} zran {}", + "invalid chunk, blob: index {}/c_size 0x{:}/d_size 0x{:x}, chunk: c_end 0x{:x}/d_end 0x{:x}/compressed {} batch {} zran {} encrypted {}", state.blob_index, state.compressed_size, state.uncompressed_size, @@ -210,6 +211,7 @@ impl BlobMetaChunkInfo for BlobChunkInfoV2Ondisk { self.is_compressed(), self.is_batch(), self.is_zran(), + self.is_encrypted() ))); } diff --git a/storage/src/meta/mod.rs b/storage/src/meta/mod.rs index 7bfb477ac7e..a8d1e23d0a4 100644 --- a/storage/src/meta/mod.rs +++ b/storage/src/meta/mod.rs @@ -33,10 +33,11 @@ use std::ops::{Add, BitAnd, Not}; use std::path::PathBuf; use std::sync::Arc; -use nydus_utils::compress; use nydus_utils::compress::zlib_random::ZranContext; +use nydus_utils::crypt::decrypt_with_context; use nydus_utils::digest::{DigestData, RafsDigest}; use nydus_utils::filemap::FileMapState; +use nydus_utils::{compress, crypt}; use crate::backend::BlobReader; use crate::device::v5::BlobV5ChunkInfo; @@ -330,6 +331,15 @@ impl BlobCompressionContextHeader { } } + /// Set flag indicating the blob is encrypted. + pub fn set_encrypted(&mut self, enable: bool) { + if enable { + self.s_features |= BlobFeatures::ENCRYPTED.bits(); + } else { + self.s_features &= !BlobFeatures::ENCRYPTED.bits(); + } + } + /// Get blob meta feature flags. pub fn features(&self) -> u32 { self.s_features @@ -753,12 +763,38 @@ impl BlobCompressionContextInfo { ))); } - let (uncompressed, header) = if blob_info.meta_ci_compressor() == compress::Algorithm::None - { - let uncompressed = &raw_data[0..uncompressed_size as usize]; - let header = &raw_data[uncompressed_size as usize..expected_raw_size]; - (Cow::Borrowed(uncompressed), header) - } else { + let decrypted = match decrypt_with_context( + &raw_data[0..compressed_size as usize], + &blob_info.cipher_object(), + &blob_info.cipher_context(), + blob_info.cipher() != crypt::Algorithm::None, + ){ + Ok(data) => data, + Err(e) => return Err(eio!(format!( + "failed to decrypt metadata for blob {} from backend, cipher {}, encrypted data size {}, {}", + blob_info.blob_id(), + blob_info.cipher(), + compressed_size, + e + ))), + }; + let header = match decrypt_with_context( + &raw_data[compressed_size as usize..expected_raw_size], + &blob_info.cipher_object(), + &blob_info.cipher_context(), + blob_info.cipher() != crypt::Algorithm::None, + ){ + Ok(data) => data, + Err(e) => return Err(eio!(format!( + "failed to decrypt meta header for blob {} from backend, cipher {}, encrypted data size {}, {}", + blob_info.blob_id(), + blob_info.cipher(), + compressed_size, + e + ))), + }; + + let uncompressed = if blob_info.meta_ci_compressor() != compress::Algorithm::None { // Lz4 does not support concurrent decompression of the same data into // the same piece of memory. There will be multiple containers mmap the // same file, causing the buffer to be shared between different @@ -772,9 +808,8 @@ impl BlobCompressionContextInfo { // time, the memory consumption and performance impact are relatively // small. let mut uncompressed = vec![0u8; uncompressed_size as usize]; - let header = &raw_data[compressed_size as usize..expected_raw_size]; compress::decompress( - &raw_data[0..compressed_size as usize], + &decrypted, &mut uncompressed, blob_info.meta_ci_compressor(), ) @@ -782,14 +817,14 @@ impl BlobCompressionContextInfo { error!("failed to decompress blob meta data: {}", e); e })?; - (Cow::Owned(uncompressed), header) + Cow::Owned(uncompressed) + } else { + decrypted }; - buffer[0..uncompressed_size as usize].copy_from_slice(&uncompressed); buffer[aligned_uncompressed_size as usize ..(aligned_uncompressed_size + BLOB_CCT_HEADER_SIZE) as usize] - .copy_from_slice(header); - + .copy_from_slice(&header); Ok(()) } @@ -1020,6 +1055,10 @@ impl BlobCompressionContext { pub(crate) fn is_separate(&self) -> bool { self.blob_features & BlobFeatures::SEPARATE.bits() != 0 } + + pub(crate) fn is_encrypted(&self) -> bool { + self.blob_features & BlobFeatures::ENCRYPTED.bits() != 0 + } } /// A customized array to host chunk information table for a blob. @@ -1112,6 +1151,7 @@ impl BlobMetaChunkArray { uncompressed_offset: u64, uncompressed_size: u32, compressed: bool, + encrypted: bool, is_batch: bool, data: u64, ) { @@ -1123,6 +1163,7 @@ impl BlobMetaChunkArray { meta.set_uncompressed_offset(uncompressed_offset); meta.set_uncompressed_size(uncompressed_size); meta.set_compressed(compressed); + meta.set_encrypted(encrypted); meta.set_batch(is_batch); meta.set_data(data); v.push(meta); @@ -1939,6 +1980,9 @@ pub fn format_blob_features(features: BlobFeatures) -> String { if features.contains(BlobFeatures::ZRAN) { output += "zran "; } + if features.contains(BlobFeatures::ENCRYPTED) { + output += "encrypted "; + } output.trim_end().to_string() } diff --git a/storage/src/meta/zran.rs b/storage/src/meta/zran.rs index 6c039a93520..0375a41847f 100644 --- a/storage/src/meta/zran.rs +++ b/storage/src/meta/zran.rs @@ -177,6 +177,7 @@ impl ZranContextGenerator { chunk.set_zran_index(info.ci_index); chunk.set_zran_offset(info.ci_offset); chunk.set_compressed(true); + chunk.set_encrypted(false); self.uncomp_pos += round_up_4k(info.ci_len as u64); diff --git a/utils/src/crypt.rs b/utils/src/crypt.rs index d387745cfdc..225898e4bfb 100644 --- a/utils/src/crypt.rs +++ b/utils/src/crypt.rs @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Alibaba Cloud. All rights reserved. +// Copyright (C) 2022-2023 Alibaba Cloud. All rights reserved. // // SPDX-License-Identifier: Apache-2.0 @@ -8,8 +8,37 @@ use std::convert::TryFrom; use std::fmt::{self, Debug, Formatter}; use std::io::Error; use std::str::FromStr; - -use openssl::symm; +use std::sync::Arc; + +use openssl::{rand, symm}; + +// The length of the data unit to be encrypted. +pub const DATA_UNIT_LENGTH: usize = 16; +// The length of thd iv (Initialization Vector) to do AES-XTS encryption. +pub const AES_XTS_IV_LENGTH: usize = 16; +// The length of the key to do AES-128-XTS encryption. +pub const AES_128_XTS_KEY_LENGTH: usize = 32; +// The length of the key to do AES-256-XTS encryption. +pub const AES_256_XTS_KEY_LENGTH: usize = 64; +// The length of the key to do AES-256-GCM encryption. +pub const AES_256_GCM_KEY_LENGTH: usize = 32; + +// The padding magic end. +pub const PADDING_MAGIC_END: [u8; 2] = [0x78, 0x90]; +// DATA_UNIT_LENGTH + length of PADDING_MAGIC_END. +pub const PADDING_LENGTH: usize = 18; +// Openssl rejects keys with identical first and second halves for xts. +// Use a default key for such cases. +const DEFAULT_CE_KEY: [u8; 32] = [ + 0xac, 0xed, 0x14, 0x69, 0x94, 0x23, 0x1e, 0xca, 0x44, 0x8c, 0xed, 0x2f, 0x6b, 0x40, 0x0c, 0x00, + 0xfd, 0xbb, 0x3f, 0xac, 0xdd, 0xc7, 0xd9, 0xee, 0x83, 0xf6, 0x5c, 0xd9, 0x3c, 0xaa, 0x28, 0x7c, +]; +const DEFAULT_CE_KEY_64: [u8; 64] = [ + 0xac, 0xed, 0x14, 0x69, 0x94, 0x23, 0x1e, 0xca, 0x44, 0x8c, 0xed, 0x2f, 0x6b, 0x40, 0x0c, 0x00, + 0xfd, 0xbb, 0x3f, 0xac, 0xdd, 0xc7, 0xd9, 0xee, 0x83, 0xf6, 0x5c, 0xd9, 0x3c, 0xaa, 0x28, 0x7c, + 0xfd, 0xbb, 0x3f, 0xac, 0xdd, 0xc7, 0xd9, 0xee, 0x83, 0xf6, 0x5c, 0xd9, 0x3c, 0xaa, 0x28, 0x7c, + 0xac, 0xed, 0x14, 0x69, 0x94, 0x23, 0x1e, 0xca, 0x44, 0x8c, 0xed, 0x2f, 0x6b, 0x40, 0x0c, 0x00, +]; /// Supported cipher algorithms. #[repr(u32)] @@ -66,6 +95,16 @@ impl Algorithm { Algorithm::Aes256Gcm => 12, } } + + /// Get key size of the encryption algorithm. + pub fn key_length(&self) -> usize { + match self { + Algorithm::None => 0, + Algorithm::Aes128Xts => AES_128_XTS_KEY_LENGTH, + Algorithm::Aes256Xts => AES_256_XTS_KEY_LENGTH, + Algorithm::Aes256Gcm => AES_256_GCM_KEY_LENGTH, + } + } } impl fmt::Display for Algorithm { @@ -159,16 +198,18 @@ impl Cipher { match self { Cipher::None => Ok(Cow::from(data)), Cipher::Aes128Xts(cipher) => { - assert_eq!(key.len(), 32); + assert_eq!(key.len(), AES_128_XTS_KEY_LENGTH); let mut buf; - let data = if data.len() >= 16 { + let data = if data.len() >= DATA_UNIT_LENGTH { data } else { // CMS (Cryptographic Message Syntax). - // This pads with the same value as the number of padding bytes. - let val = (16 - data.len()) as u8; - buf = [val; 16]; + // This pads with the same value as the number of padding bytes + // and appends the magic padding end. + let val = (DATA_UNIT_LENGTH - data.len()) as u8; + buf = [val; PADDING_LENGTH]; buf[..data.len()].copy_from_slice(data); + buf[DATA_UNIT_LENGTH..PADDING_LENGTH].copy_from_slice(&PADDING_MAGIC_END); &buf }; Self::cipher(*cipher, symm::Mode::Encrypt, key, iv, data) @@ -176,16 +217,15 @@ impl Cipher { .map_err(|e| eother!(format!("failed to encrypt data, {}", e))) } Cipher::Aes256Xts(cipher) => { - assert_eq!(key.len(), 64); + assert_eq!(key.len(), AES_256_XTS_KEY_LENGTH); let mut buf; - let data = if data.len() >= 16 { + let data = if data.len() >= DATA_UNIT_LENGTH { data } else { - // CMS (Cryptographic Message Syntax). - // This pads with the same value as the number of padding bytes. - let val = (16 - data.len()) as u8; - buf = [val; 16]; + let val = (DATA_UNIT_LENGTH - data.len()) as u8; + buf = [val; PADDING_LENGTH]; buf[..data.len()].copy_from_slice(data); + buf[DATA_UNIT_LENGTH..PADDING_LENGTH].copy_from_slice(&PADDING_MAGIC_END); &buf }; Self::cipher(*cipher, symm::Mode::Encrypt, key, iv, data) @@ -199,13 +239,7 @@ impl Cipher { } /// Decrypt encrypted data with optional IV and return the decrypted data. - pub fn decrypt( - &self, - key: &[u8], - iv: Option<&[u8]>, - data: &[u8], - size: usize, - ) -> Result, Error> { + pub fn decrypt(&self, key: &[u8], iv: Option<&[u8]>, data: &[u8]) -> Result, Error> { let mut data = match self { Cipher::None => Ok(data.to_vec()), Cipher::Aes128Xts(cipher) => Self::cipher(*cipher, symm::Mode::Decrypt, key, iv, data) @@ -218,18 +252,19 @@ impl Cipher { }?; // Trim possible padding. - if data.len() > size { - if data.len() != 16 { - return Err(einval!("Cipher::decrypt: invalid padding data")); - } - let val = (16 - size) as u8; - for item in data.iter().skip(size) { - if *item != val { - return Err(einval!("Cipher::decrypt: invalid padding data")); - } + if data.len() == PADDING_LENGTH + && data[PADDING_LENGTH - PADDING_MAGIC_END.len()..PADDING_LENGTH] == PADDING_MAGIC_END + { + let val = data[DATA_UNIT_LENGTH - 1] as usize; + if val < DATA_UNIT_LENGTH { + data.truncate(DATA_UNIT_LENGTH - val); + } else { + return Err(einval!(format!( + "Cipher::decrypt: invalid padding data, value {}", + val, + ))); } - data.truncate(size); - } + }; Ok(data) } @@ -277,8 +312,8 @@ impl Cipher { match self { Cipher::None => plaintext_size, Cipher::Aes128Xts(_) | Cipher::Aes256Xts(_) => { - if plaintext_size < 16 { - 16 + if plaintext_size < DATA_UNIT_LENGTH { + DATA_UNIT_LENGTH } else { plaintext_size } @@ -320,6 +355,90 @@ impl Cipher { out.truncate(count + rest); Ok(out) } + + pub fn generate_random_key(cipher_algo: Algorithm) -> Result, Error> { + let length = cipher_algo.key_length(); + let mut buf = vec![0u8; length]; + if let Err(e) = rand::rand_bytes(&mut buf) { + Err(eother!(format!( + "failed to generate key for {}, {}", + cipher_algo, e + ))) + } else { + Ok(Self::tweak_key_for_xts(&buf).to_vec()) + } + } + + pub fn generate_random_iv() -> Result, Error> { + let mut buf = vec![0u8; AES_XTS_IV_LENGTH]; + if let Err(e) = rand::rand_bytes(&mut buf) { + Err(eother!(format!("failed to generate iv, {}", e))) + } else { + Ok(buf) + } + } +} + +/// Struct to provide context information for data encryption/decryption. +#[derive(Default, Debug, Clone)] +pub struct CipherContext { + key: Vec, + iv: Vec, + convergent_encryption: bool, + cipher_algo: Algorithm, +} + +impl CipherContext { + /// Create a new instance of [CipherContext]. + pub fn new( + key: Vec, + iv: Vec, + convergent_encryption: bool, + cipher_algo: Algorithm, + ) -> Result { + let key_length = key.len(); + if key_length != cipher_algo.key_length() { + return Err(einval!(format!( + "invalid key length {} for {} encryption", + key_length, cipher_algo + ))); + } else if key[0..key_length >> 1] == key[key_length >> 1..key_length] { + return Err(einval!("invalid symmetry key for encryption")); + } + + Ok(CipherContext { + key, + iv, + convergent_encryption, + cipher_algo, + }) + } + + /// Generate context information from data for encryption/decryption. + pub fn generate_cipher_meta<'a>(&'a self, data: &'a [u8]) -> (&'a [u8], Vec) { + let length = data.len(); + assert_eq!(length, self.cipher_algo.key_length()); + let iv = vec![0u8; AES_XTS_IV_LENGTH]; + if self.convergent_encryption { + if length == AES_128_XTS_KEY_LENGTH && data[0..length >> 1] == data[length >> 1..length] + { + (&DEFAULT_CE_KEY, iv) + } else if length == AES_256_XTS_KEY_LENGTH + && data[0..length >> 1] == data[length >> 1..length] + { + (&DEFAULT_CE_KEY_64, iv) + } else { + (data, iv) + } + } else { + (&self.key, iv) + } + } + + /// Get context information for meta data encryption/decryption. + pub fn get_cipher_meta(&self) -> (&[u8], &[u8]) { + (&self.key, &self.iv) + } } /// A customized buf allocator that avoids zeroing @@ -332,6 +451,44 @@ fn alloc_buf(size: usize) -> Vec { unsafe { Vec::from_raw_parts(ptr, size, layout.size()) } } +// Encrypt data with Cipher and CipherContext. +pub fn encrypt_with_context<'a>( + data: &'a [u8], + cipher_obj: &Arc, + cipher_ctx: &Option, + encrypted: bool, +) -> Result, Error> { + if encrypted { + if let Some(cipher_ctx) = cipher_ctx { + let (key, iv) = cipher_ctx.get_cipher_meta(); + Ok(cipher_obj.encrypt(key, Some(iv), data)?) + } else { + Err(einval!("the encrypt context can not be none")) + } + } else { + Ok(Cow::Borrowed(data)) + } +} + +// Decrypt data with Cipher and CipherContext. +pub fn decrypt_with_context<'a>( + data: &'a [u8], + cipher_obj: &Arc, + cipher_ctx: &Option, + encrypted: bool, +) -> Result, Error> { + if encrypted { + if let Some(cipher_ctx) = cipher_ctx { + let (key, iv) = cipher_ctx.get_cipher_meta(); + Ok(Cow::from(cipher_obj.decrypt(key, Some(iv), data)?)) + } else { + Err(einval!("the decrypt context can not be none")) + } + } else { + Ok(Cow::Borrowed(data)) + } +} + #[cfg(test)] mod tests { use super::*; @@ -353,7 +510,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[0u8; 16]), b"1") .unwrap(); assert_eq!(ciphertext1, ciphertext2); - assert_eq!(ciphertext2.len(), 16); + assert_eq!(ciphertext2.len(), PADDING_LENGTH); let ciphertext3 = cipher .encrypt(key.as_slice(), Some(&[0u8; 16]), b"11111111111111111") @@ -386,7 +543,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[0u8; 16]), b"1") .unwrap(); assert_eq!(ciphertext1, ciphertext2); - assert_eq!(ciphertext2.len(), 16); + assert_eq!(ciphertext2.len(), PADDING_LENGTH); let ciphertext3 = cipher .encrypt(key.as_slice(), Some(&[0u8; 16]), b"11111111111111111") @@ -416,7 +573,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[0u8; 16]), b"1") .unwrap(); let plaintext1 = cipher - .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext1, 1) + .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext1) .unwrap(); assert_eq!(&plaintext1, b"1"); @@ -424,7 +581,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[0u8; 16]), b"11111111111111111") .unwrap(); let plaintext2 = cipher - .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext2, 17) + .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext2) .unwrap(); assert_eq!(&plaintext2, b"11111111111111111"); @@ -432,7 +589,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[1u8; 16]), b"11111111111111111") .unwrap(); let plaintext3 = cipher - .decrypt(key.as_slice(), Some(&[1u8; 16]), &ciphertext3, 17) + .decrypt(key.as_slice(), Some(&[1u8; 16]), &ciphertext3) .unwrap(); assert_eq!(&plaintext3, b"11111111111111111"); } @@ -447,7 +604,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[0u8; 16]), b"1") .unwrap(); let plaintext1 = cipher - .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext1, 1) + .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext1) .unwrap(); assert_eq!(&plaintext1, b"1"); @@ -455,7 +612,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[0u8; 16]), b"11111111111111111") .unwrap(); let plaintext2 = cipher - .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext2, 17) + .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext2) .unwrap(); assert_eq!(&plaintext2, b"11111111111111111"); @@ -463,7 +620,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[1u8; 16]), b"11111111111111111") .unwrap(); let plaintext3 = cipher - .decrypt(key.as_slice(), Some(&[1u8; 16]), &ciphertext3, 17) + .decrypt(key.as_slice(), Some(&[1u8; 16]), &ciphertext3) .unwrap(); assert_eq!(&plaintext3, b"11111111111111111"); } From 0314971171b3359e817b704ba32be7a534d86649 Mon Sep 17 00:00:00 2001 From: taohong Date: Tue, 4 Jul 2023 15:55:25 +0800 Subject: [PATCH 2/2] tests: add encrypt integration test Add image encryption test integration case to Smoke test. Signed-off-by: taohong --- smoke/tests/image_test.go | 23 +++++++++++++++++------ smoke/tests/native_layer_test.go | 5 +++-- smoke/tests/tool/context.go | 1 + 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/smoke/tests/image_test.go b/smoke/tests/image_test.go index 4ca45fed99f..ff785b38df5 100644 --- a/smoke/tests/image_test.go +++ b/smoke/tests/image_test.go @@ -15,8 +15,9 @@ import ( ) const ( - paramZran = "zran" - paramBatch = "batch" + paramZran = "zran" + paramBatch = "batch" + paramEncrypt = "encrypt" ) type ImageTestSuite struct { @@ -32,15 +33,19 @@ func (i *ImageTestSuite) TestConvertImages() test.Generator { Dimension(paramFSVersion, []interface{}{"5", "6"}). Dimension(paramZran, []interface{}{false, true}). Dimension(paramBatch, []interface{}{"0", "0x100000"}). + Dimension(paramEncrypt, []interface{}{false, true}). Skip( func(param *tool.DescartesItem) bool { // Zran and Batch not work with rafs v5. - if param.GetString(paramFSVersion) == "5" && (param.GetBool(paramZran) || param.GetString(paramBatch) != "0") { + if param.GetString(paramFSVersion) == "5" && (param.GetBool(paramZran)) || + param.GetString(paramBatch) != "0" || (param.GetBool(paramEncrypt)) { return true } // Zran and Batch can not work together. - return param.GetBool(paramZran) && param.GetString(paramBatch) != "0" + // Zran and Encrpt can not work together. + return (param.GetBool(paramZran) && param.GetString(paramBatch) != "0") || + (param.GetBool(paramZran) && param.GetBool(paramEncrypt)) }) return func() (name string, testCase test.Case) { @@ -53,6 +58,7 @@ func (i *ImageTestSuite) TestConvertImages() test.Generator { ctx.Build.FSVersion = scenario.GetString(paramFSVersion) ctx.Build.OCIRef = scenario.GetBool(paramZran) ctx.Build.BatchSize = scenario.GetString(paramBatch) + ctx.Build.Encrypt = scenario.GetBool(paramEncrypt) image := i.prepareImage(i.T, scenario.GetString(paramImage)) return scenario.Str(), func(t *testing.T) { @@ -78,6 +84,11 @@ func (i *ImageTestSuite) TestConvertImage(t *testing.T, ctx tool.Context, source enableBatchSize = "--batch-size " + ctx.Build.BatchSize } + enableEncrypt := "" + if ctx.Build.Encrypt { + enableEncrypt = "--encrypt" + } + target := fmt.Sprintf("%s-nydus-%s", source, uuid.NewString()) fsVersion := fmt.Sprintf("--fs-version %s", ctx.Build.FSVersion) logLevel := "--log-level warn" @@ -92,8 +103,8 @@ func (i *ImageTestSuite) TestConvertImage(t *testing.T, ctx tool.Context, source // Convert image convertCmd := fmt.Sprintf( - "%s %s convert --source %s --target %s %s %s %s --nydus-image %s --work-dir %s %s", - ctx.Binary.Nydusify, logLevel, source, target, fsVersion, enableOCIRef, enableBatchSize, ctx.Binary.Builder, ctx.Env.WorkDir, compressor, + "%s %s convert --source %s --target %s %s %s %s %s --nydus-image %s --work-dir %s %s", + ctx.Binary.Nydusify, logLevel, source, target, fsVersion, enableOCIRef, enableBatchSize, enableEncrypt, ctx.Binary.Builder, ctx.Env.WorkDir, compressor, ) tool.RunWithoutOutput(t, convertCmd) diff --git a/smoke/tests/native_layer_test.go b/smoke/tests/native_layer_test.go index 17f1e2939c8..99f1031b309 100644 --- a/smoke/tests/native_layer_test.go +++ b/smoke/tests/native_layer_test.go @@ -42,6 +42,7 @@ func (n *NativeLayerTestSuite) TestMakeLayers() test.Generator { Dimension(paramRafsMode, []interface{}{"direct", "cached"}). Dimension(paramEnablePrefetch, []interface{}{false, true}). Dimension(paramBatch, []interface{}{"0", "0x100000"}). + Dimension(paramEncrypt, []interface{}{false, true}). Skip(func(param *tool.DescartesItem) bool { // rafs v6 not support cached mode nor dummy cache @@ -54,8 +55,8 @@ func (n *NativeLayerTestSuite) TestMakeLayers() test.Generator { return true } - // Batch not work with rafs v5. - if param.GetString(paramFSVersion) == "5" && param.GetString(paramBatch) != "0" { + // Batch or encrypt not work with rafs v5. + if param.GetString(paramFSVersion) == "5" && (param.GetString(paramBatch) != "0" || param.GetBool(paramEncrypt)) { return true } diff --git a/smoke/tests/tool/context.go b/smoke/tests/tool/context.go index 80a0f5e16ce..9e714eb0cf0 100644 --- a/smoke/tests/tool/context.go +++ b/smoke/tests/tool/context.go @@ -29,6 +29,7 @@ type BuildContext struct { OCIRef bool OCIRefGzip bool BatchSize string + Encrypt bool } type RuntimeContext struct {