From c74b4a0641bd667430639c5505b493a2a285bbdf Mon Sep 17 00:00:00 2001 From: taohong Date: Tue, 20 Jun 2023 16:16:03 +0800 Subject: [PATCH] feat: nydus support encrypted images Extend native nydus v6 to support handling encrypted containers images: * An encrypted nydus image is composed of encrypted bootstrap and chunk-level encrypted data blobs. The bootstrap is encrypted by the Ocicrypt and the data blobs are encrypted by aes-128-xts with randomly generated key and iv at chunk-level. * The chunk encryption key and iv are stored in the blob info reusing some items of the structure to save reserved space. * For every data blob, all the chunk data, conpression context. table and compression context table header are encrypted. * Encrypted chunk data will be decrypted first, and then be decompressed while bing fetched by the storage backend. Signed-off-by: taohong --- builder/src/compact.rs | 4 + builder/src/core/blob.rs | 42 ++++++-- builder/src/core/context.rs | 70 +++++++++++- builder/src/core/node.rs | 30 ++++-- builder/src/core/v6.rs | 1 + builder/src/merge.rs | 7 ++ builder/src/stargz.rs | 1 + builder/src/tarball.rs | 33 ++++++ rafs/src/metadata/chunk.rs | 25 +++++ rafs/src/metadata/layout/v6.rs | 107 +++++++++++++++++-- rafs/src/metadata/mod.rs | 37 ++++++- src/bin/nydus-image/inspect.rs | 2 + src/bin/nydus-image/main.rs | 43 +++++++- storage/src/cache/cachedfile.rs | 18 ++-- storage/src/cache/dummycache.rs | 6 +- storage/src/cache/filecache/mod.rs | 7 +- storage/src/cache/mod.rs | 68 +++++++++--- storage/src/context.rs | 60 ----------- storage/src/device.rs | 34 +++++- storage/src/lib.rs | 1 - storage/src/meta/batch.rs | 2 + storage/src/meta/chunk_info_v2.rs | 12 ++- storage/src/meta/mod.rs | 73 ++++++++++--- storage/src/meta/zran.rs | 1 + utils/src/crypt.rs | 166 ++++++++++++++++++++++------- 25 files changed, 674 insertions(+), 176 deletions(-) delete mode 100644 storage/src/context.rs diff --git a/builder/src/compact.rs b/builder/src/compact.rs index 3721c4694ac..19fce1d0137 100644 --- a/builder/src/compact.rs +++ b/builder/src/compact.rs @@ -552,6 +552,9 @@ impl BlobCompactor { build_ctx.blob_features, build_ctx.compressor, build_ctx.digester, + build_ctx.cipher, + Default::default(), + None, ); blob_ctx.set_meta_info_enabled(self.is_v6()); let blob_idx = self.new_blob_mgr.alloc_index()?; @@ -606,6 +609,7 @@ impl BlobCompactor { None, false, Features::new(), + false, ); let mut bootstrap_mgr = BootstrapManager::new(Some(ArtifactStorage::SingleFile(d_bootstrap)), None); diff --git a/builder/src/core/blob.rs b/builder/src/core/blob.rs index 8f18f2faad9..2bbc76bba49 100644 --- a/builder/src/core/blob.rs +++ b/builder/src/core/blob.rs @@ -5,13 +5,15 @@ use std::borrow::Cow; use std::io::Write; use std::slice; +use std::sync::Arc; -use anyhow::{Context, Result}; +use anyhow::{Context, Error, Result}; use nydus_rafs::metadata::RAFS_MAX_CHUNK_SIZE; use nydus_storage::device::BlobFeatures; use nydus_storage::meta::{toc, BlobMetaChunkArray}; -use nydus_utils::compress; +use nydus_utils::crypt::{Cipher, CipherContext}; use nydus_utils::digest::{self, DigestHasher, RafsDigest}; +use nydus_utils::{compress, crypt}; use sha2::digest::Digest; use super::layout::BlobLayout; @@ -159,6 +161,9 @@ impl Blob { } // Prepare blob meta information data. + let encrypt = ctx.cipher != crypt::Algorithm::None; + let cipher_obj = &blob_ctx.cipher_object; + let cipher_ctx = &blob_ctx.cipher_ctx; let blob_meta_info = &blob_ctx.blob_meta_info; let mut ci_data = blob_meta_info.as_byte_slice(); let mut inflate_buf = Vec::new(); @@ -194,8 +199,11 @@ impl Blob { if !compressed { compressor = compress::Algorithm::None; } + + let encrypted_ci_data = + Self::encrypt_meta_data(&compressed_data, cipher_obj, cipher_ctx, encrypt)?; let compressed_offset = blob_writer.pos()?; - let compressed_size = compressed_data.len() as u64; + let compressed_size = encrypted_ci_data.len() as u64; let uncompressed_size = ci_data.len() as u64; header.set_ci_compressor(compressor); @@ -212,18 +220,20 @@ impl Blob { header.set_inlined_chunk_digest(true); } - let header_size = header.as_bytes().len(); blob_ctx.blob_meta_header = header; + let encrypted_header = + Self::encrypt_meta_data(header.as_bytes(), cipher_obj, cipher_ctx, encrypt)?; + let header_size = encrypted_header.len(); // Write blob meta data and header - match compressed_data { + match encrypted_ci_data { Cow::Owned(v) => blob_ctx.write_data(blob_writer, &v)?, Cow::Borrowed(v) => { let buf = v.to_vec(); blob_ctx.write_data(blob_writer, &buf)?; } } - blob_ctx.write_data(blob_writer, header.as_bytes())?; + blob_ctx.write_data(blob_writer, &encrypted_header)?; // Write tar header for `blob.meta`. if ctx.blob_inline_meta || ctx.features.is_enabled(Feature::BlobToc) { @@ -290,4 +300,24 @@ impl Blob { Ok(()) } + + fn encrypt_meta_data<'a>( + data: &'a [u8], + cipher_obj: &Arc, + cipher_ctx: &Option, + encrypted: bool, + ) -> Result, Error> { + if encrypted { + if let Some(cipher_ctx) = cipher_ctx { + let (key, iv) = cipher_ctx.get_meta_cipher_context(); + Ok(cipher_obj + .encrypt(key, Some(iv), data) + .context("failed to encrypt meta data")?) + } else { + Err(Error::msg("the encrypt context can not be none")) + } + } else { + Ok(Cow::Borrowed(data)) + } + } } diff --git a/builder/src/core/context.rs b/builder/src/core/context.rs index 56d0a646532..ca9f720e065 100644 --- a/builder/src/core/context.rs +++ b/builder/src/core/context.rs @@ -17,6 +17,7 @@ use std::sync::{Arc, Mutex}; use std::{fmt, fs}; use anyhow::{anyhow, Context, Error, Result}; +use nydus_utils::crypt::{self, Cipher, CipherContext}; use sha2::{Digest, Sha256}; use tar::{EntryType, Header}; use vmm_sys_util::tempfile::TempFile; @@ -373,6 +374,7 @@ pub struct BlobContext { pub blob_hash: Sha256, pub blob_compressor: compress::Algorithm, pub blob_digester: digest::Algorithm, + pub blob_cipher: crypt::Algorithm, pub blob_prefetch_size: u64, /// Whether to generate blob metadata information. pub blob_meta_info_enabled: bool, @@ -412,16 +414,23 @@ pub struct BlobContext { pub blob_toc_size: u32, pub entry_list: toc::TocEntryList, + /// Cipher to encrypt the RAFS blobs. + pub cipher_object: Arc, + pub cipher_ctx: Option, } impl BlobContext { /// Create a new instance of [BlobContext]. + #[allow(clippy::too_many_arguments)] pub fn new( blob_id: String, blob_offset: u64, features: BlobFeatures, compressor: compress::Algorithm, digester: digest::Algorithm, + cipher: crypt::Algorithm, + cipher_object: Arc, + cipher_ctx: Option, ) -> Self { let blob_meta_info = if features.contains(BlobFeatures::CHUNK_INFO_V2) { BlobMetaChunkArray::new_v2() @@ -433,6 +442,7 @@ impl BlobContext { blob_hash: Sha256::new(), blob_compressor: compressor, blob_digester: digester, + blob_cipher: cipher, blob_prefetch_size: 0, blob_meta_info_enabled: false, blob_meta_info, @@ -455,6 +465,8 @@ impl BlobContext { blob_toc_size: 0, entry_list: toc::TocEntryList::new(), + cipher_object, + cipher_ctx, }; blob_ctx @@ -578,7 +590,18 @@ impl BlobContext { } } - let mut blob_ctx = Self::new(blob_id, 0, features, blob.compressor(), blob.digester()); + let (cipher, cipher_object, cipher_ctx) = blob.get_cipher_info(); + + let mut blob_ctx = Self::new( + blob_id, + 0, + features, + blob.compressor(), + blob.digester(), + cipher, + cipher_object, + cipher_ctx, + ); blob_ctx.blob_prefetch_size = blob.prefetch_size(); blob_ctx.chunk_count = blob.chunk_count(); blob_ctx.uncompressed_blob_size = blob.uncompressed_size(); @@ -630,6 +653,15 @@ impl BlobContext { self.blob_meta_info_enabled = enable; } + pub fn set_cipher_info( + &mut self, + cipher_object: Arc, + cipher_ctx: Option, + ) { + self.cipher_object = cipher_object; + self.cipher_ctx = cipher_ctx; + } + pub fn add_chunk_meta_info( &mut self, chunk: &ChunkWrapper, @@ -658,6 +690,7 @@ impl BlobContext { chunk.uncompressed_offset(), chunk.uncompressed_size(), chunk.is_compressed(), + chunk.is_encrypted(), chunk.is_batch(), 0, ); @@ -751,12 +784,33 @@ impl BlobManager { } fn new_blob_ctx(ctx: &BuildContext) -> Result { + let (cipher_object, cipher_ctx) = match ctx.cipher { + crypt::Algorithm::None => (Default::default(), None), + crypt::Algorithm::Aes128Xts => { + let key = crypt::Cipher::generate_key_for_aes_xts(crypt::AES_128_XTS_KEY_LENGTH)?; + let iv = crypt::Cipher::generate_random_iv(crypt::AES_XTS_IV_LENGTH)?; + let cipher_ctx = CipherContext::new(key, iv, false)?; + ( + ctx.cipher.new_cipher().ok().unwrap_or(Default::default()), + Some(cipher_ctx), + ) + } + _ => { + return Err(anyhow!(format!( + "cipher algorithm {:?} does not support", + ctx.cipher + ))) + } + }; let mut blob_ctx = BlobContext::new( ctx.blob_id.clone(), ctx.blob_offset, ctx.blob_features, ctx.compressor, ctx.digester, + ctx.cipher, + Arc::new(cipher_object), + cipher_ctx, ); blob_ctx.set_chunk_size(ctx.chunk_size); blob_ctx.set_meta_info_enabled( @@ -936,6 +990,7 @@ impl BlobManager { RafsBlobTable::V6(table) => { flags |= RafsSuperFlags::from(ctx.blob_compressor); flags |= RafsSuperFlags::from(ctx.blob_digester); + flags |= RafsSuperFlags::from(ctx.blob_cipher); table.add( blob_id, 0, @@ -950,6 +1005,8 @@ impl BlobManager { ctx.blob_meta_size, ctx.blob_toc_size, ctx.blob_meta_header, + ctx.cipher_object.clone(), + ctx.cipher_ctx.clone(), ); } } @@ -1087,6 +1144,8 @@ pub struct BuildContext { pub compressor: compress::Algorithm, /// Inode and chunk digest algorithm flag. pub digester: digest::Algorithm, + /// Blob encryption algorithm flag. + pub cipher: crypt::Algorithm, /// Save host uid gid in each inode. pub explicit_uidgid: bool, /// whiteout spec: overlayfs or oci @@ -1138,6 +1197,7 @@ impl BuildContext { blob_storage: Option, blob_inline_meta: bool, features: Features, + encrypt: bool, ) -> Self { // It's a flag for images built with new nydus-image 2.2 and newer. let mut blob_features = BlobFeatures::CAP_TAR_TOC; @@ -1153,12 +1213,19 @@ impl BuildContext { blob_features |= BlobFeatures::TARFS; } + let cipher = if encrypt { + crypt::Algorithm::Aes128Xts + } else { + crypt::Algorithm::None + }; + BuildContext { blob_id, aligned_chunk, blob_offset, compressor, digester, + cipher, explicit_uidgid, whiteout_spec, @@ -1208,6 +1275,7 @@ impl Default for BuildContext { blob_offset: 0, compressor: compress::Algorithm::default(), digester: digest::Algorithm::default(), + cipher: crypt::Algorithm::None, explicit_uidgid: true, whiteout_spec: WhiteoutSpec::default(), diff --git a/builder/src/core/node.rs b/builder/src/core/node.rs index 44a59905aa1..95fdced2de5 100644 --- a/builder/src/core/node.rs +++ b/builder/src/core/node.rs @@ -24,8 +24,8 @@ use nydus_rafs::metadata::layout::RafsXAttrs; use nydus_rafs::metadata::{Inode, RafsVersion}; use nydus_storage::device::BlobFeatures; use nydus_storage::meta::{BlobChunkInfoV2Ondisk, BlobMetaChunkInfo}; -use nydus_utils::compress; use nydus_utils::digest::{DigestHasher, RafsDigest}; +use nydus_utils::{compress, crypt}; use nydus_utils::{div_round_up, event_tracer, root_tracer, try_round_up_4k, ByteSize}; use sha2::digest::Digest; @@ -380,6 +380,10 @@ impl Node { chunk.set_id(RafsDigest::from_buf(buf, ctx.digester)); } + if ctx.cipher != crypt::Algorithm::None && !ctx.conversion_type.is_to_ref() { + chunk.set_encrypted(true); + } + Ok((chunk, chunk_info)) } @@ -407,6 +411,7 @@ impl Node { chunk.set_uncompressed_size(d_size); let mut chunk_info = None; + let encrypted = blob_ctx.blob_cipher != crypt::Algorithm::None; if self.inode.child_count() == 1 && d_size < ctx.batch_size / 2 @@ -417,7 +422,7 @@ impl Node { if batch.chunk_data_buf_len() as u32 + d_size < ctx.batch_size { // Add into current batch chunk directly. - chunk_info = Some(batch.generate_chunk_info(pre_d_offset, d_size)?); + chunk_info = Some(batch.generate_chunk_info(pre_d_offset, d_size, encrypted)?); batch.append_chunk_data_buf(chunk_data); } else { // Dump current batch chunk if exists, and then add into a new batch chunk. @@ -430,7 +435,7 @@ impl Node { } // Add into a new batch chunk. - chunk_info = Some(batch.generate_chunk_info(pre_d_offset, d_size)?); + chunk_info = Some(batch.generate_chunk_info(pre_d_offset, d_size, encrypted)?); batch.append_chunk_data_buf(chunk_data); } } else if !ctx.blob_features.contains(BlobFeatures::SEPARATE) { @@ -470,12 +475,25 @@ impl Node { ) -> Result<(u64, u32, bool)> { let (compressed, is_compressed) = compress::compress(chunk_data, ctx.compressor) .with_context(|| "failed to compress node file".to_string())?; - let compressed_size = compressed.len() as u32; + let encrypted = if blob_ctx.blob_cipher != crypt::Algorithm::None { + if let Some(cipher_ctx) = &blob_ctx.cipher_ctx { + let (key, iv) = cipher_ctx.get_meta_cipher_context(); + blob_ctx + .cipher_object + .encrypt(key, Some(iv), &compressed) + .context("failed to encrypt meta data")? + } else { + return Err(Error::msg("the encrypt context can not be none")); + } + } else { + compressed + }; + let compressed_size = encrypted.len() as u32; let pre_compressed_offset = blob_ctx.current_compressed_offset; blob_writer - .write_all(&compressed) + .write_all(&encrypted) .context("failed to write blob")?; - blob_ctx.blob_hash.update(&compressed); + blob_ctx.blob_hash.update(&encrypted); blob_ctx.current_compressed_offset += compressed_size as u64; blob_ctx.compressed_blob_size += compressed_size as u64; diff --git a/builder/src/core/v6.rs b/builder/src/core/v6.rs index f2ea8ff208d..f81e0b95432 100644 --- a/builder/src/core/v6.rs +++ b/builder/src/core/v6.rs @@ -692,6 +692,7 @@ impl Bootstrap { let mut ext_sb = RafsV6SuperBlockExt::new(); ext_sb.set_compressor(ctx.compressor); ext_sb.set_digester(ctx.digester); + ext_sb.set_cipher(ctx.cipher); ext_sb.set_chunk_size(ctx.chunk_size); ext_sb.set_blob_table_offset(blob_table_offset); ext_sb.set_blob_table_size(blob_table_size as u32); diff --git a/builder/src/merge.rs b/builder/src/merge.rs index 2ab173bbee2..f3d2a00994f 100644 --- a/builder/src/merge.rs +++ b/builder/src/merge.rs @@ -138,6 +138,7 @@ impl Merger { let mut fs_version = RafsVersion::V6; let mut chunk_size = None; + let mut cipher = ctx.cipher; for (layer_idx, bootstrap_path) in sources.iter().enumerate() { let (rs, _) = RafsSuper::load_from_file(bootstrap_path, config_v2.clone(), false) @@ -147,8 +148,14 @@ impl Merger { .check_compatibility(&rs.meta)?; fs_version = RafsVersion::try_from(rs.meta.version) .context("failed to get RAFS version number")?; + if layer_idx != 0 && rs.meta.get_cipher() != cipher { + bail!("invalid per layer bootstrap, using different encryption algorithms"); + } else { + cipher = rs.meta.get_cipher(); + } ctx.compressor = rs.meta.get_compressor(); ctx.digester = rs.meta.get_digester(); + ctx.cipher = cipher; ctx.explicit_uidgid = rs.meta.explicit_uidgid(); if config.as_ref().unwrap().is_tarfs_mode { ctx.conversion_type = ConversionType::TarToTarfs; diff --git a/builder/src/stargz.rs b/builder/src/stargz.rs index 148e5af5159..363e0f641de 100644 --- a/builder/src/stargz.rs +++ b/builder/src/stargz.rs @@ -931,6 +931,7 @@ mod tests { Some(ArtifactStorage::FileDir(tmp_dir.clone())), false, Features::new(), + false, ); ctx.fs_version = RafsVersion::V6; let mut bootstrap_mgr = diff --git a/builder/src/tarball.rs b/builder/src/tarball.rs index 3cdba64a2e2..605fc7871ab 100644 --- a/builder/src/tarball.rs +++ b/builder/src/tarball.rs @@ -618,6 +618,39 @@ mod tests { Some(ArtifactStorage::FileDir(tmp_dir.clone())), false, Features::new(), + false, + ); + let mut bootstrap_mgr = + BootstrapManager::new(Some(ArtifactStorage::FileDir(tmp_dir)), None); + let mut blob_mgr = BlobManager::new(digest::Algorithm::Sha256); + let mut builder = TarballBuilder::new(ConversionType::TarToTarfs); + builder + .build(&mut ctx, &mut bootstrap_mgr, &mut blob_mgr) + .unwrap(); + } + + #[test] + fn test_build_encrypted_tarfs() { + let tmp_dir = vmm_sys_util::tempdir::TempDir::new().unwrap(); + let tmp_dir = tmp_dir.as_path().to_path_buf(); + let root_dir = &std::env::var("CARGO_MANIFEST_DIR").expect("$CARGO_MANIFEST_DIR"); + let source_path = PathBuf::from(root_dir).join("../tests/texture/tar/all-entry-type.tar"); + let prefetch = Prefetch::default(); + let mut ctx = BuildContext::new( + "test".to_string(), + true, + 0, + compress::Algorithm::None, + digest::Algorithm::Sha256, + true, + WhiteoutSpec::Oci, + ConversionType::TarToTarfs, + source_path, + prefetch, + Some(ArtifactStorage::FileDir(tmp_dir.clone())), + false, + Features::new(), + true, ); let mut bootstrap_mgr = BootstrapManager::new(Some(ArtifactStorage::FileDir(tmp_dir)), None); diff --git a/rafs/src/metadata/chunk.rs b/rafs/src/metadata/chunk.rs index cb9a9277b3a..0929ef9c2cf 100644 --- a/rafs/src/metadata/chunk.rs +++ b/rafs/src/metadata/chunk.rs @@ -248,6 +248,27 @@ impl ChunkWrapper { } } + /// Check whether the chunk is encrypted or not. + pub fn is_encrypted(&self) -> bool { + match self { + ChunkWrapper::V5(c) => c.flags.contains(BlobChunkFlags::ENCYPTED), + ChunkWrapper::V6(c) => c.flags.contains(BlobChunkFlags::ENCYPTED), + ChunkWrapper::Ref(c) => as_blob_v5_chunk_info(c.deref()) + .flags() + .contains(BlobChunkFlags::ENCYPTED), + } + } + + /// Set flag for whether chunk is encrypted. + pub fn set_encrypted(&mut self, encrypted: bool) { + self.ensure_owned(); + match self { + ChunkWrapper::V5(c) => c.flags.set(BlobChunkFlags::ENCYPTED, encrypted), + ChunkWrapper::V6(c) => c.flags.set(BlobChunkFlags::ENCYPTED, encrypted), + ChunkWrapper::Ref(_c) => panic!("unexpected"), + } + } + /// Set flag for whether chunk is batch chunk. pub fn set_batch(&mut self, batch: bool) { self.ensure_owned(); @@ -281,6 +302,7 @@ impl ChunkWrapper { compressed_offset: u64, compressed_size: u32, is_compressed: bool, + is_encrypted: bool, ) -> Result<()> { self.ensure_owned(); match self { @@ -307,6 +329,9 @@ impl ChunkWrapper { if is_compressed { c.flags |= BlobChunkFlags::COMPRESSED; } + if is_encrypted { + c.flags |= BlobChunkFlags::ENCYPTED; + } } ChunkWrapper::Ref(_c) => panic!("unexpected"), } diff --git a/rafs/src/metadata/layout/v6.rs b/rafs/src/metadata/layout/v6.rs index e74aa6bdb69..a21863ec97c 100644 --- a/rafs/src/metadata/layout/v6.rs +++ b/rafs/src/metadata/layout/v6.rs @@ -4,7 +4,7 @@ // SPDX-License-Identifier: Apache-2.0 use std::collections::HashMap; -use std::convert::TryFrom; +use std::convert::{TryFrom, TryInto}; use std::ffi::{OsStr, OsString}; use std::fmt::Debug; use std::io::{Read, Result}; @@ -19,6 +19,7 @@ use nydus_storage::meta::{ BlobChunkInfoV1Ondisk, BlobChunkInfoV2Ondisk, BlobCompressionContextHeader, }; use nydus_storage::{RAFS_MAX_CHUNKS_PER_BLOB, RAFS_MAX_CHUNK_SIZE}; +use nydus_utils::crypt::{self, Cipher, CipherContext}; use nydus_utils::{compress, digest, round_up, ByteSize}; use crate::metadata::inode::InodeWrapper; @@ -572,6 +573,15 @@ impl RafsV6SuperBlockExt { self.set_chunk_table_size(size); } + /// Set encryption algorithm to encrypt chunks of the Rafs filesystem. + pub fn set_cipher(&mut self, cipher: crypt::Algorithm) { + let c: RafsSuperFlags = cipher.into(); + + self.s_flags &= !RafsSuperFlags::ENCRYPTION_NONE.bits(); + self.s_flags &= !RafsSuperFlags::ENCRYPTION_ASE_128_XTS.bits(); + self.s_flags |= c.bits(); + } + impl_pub_getter_setter!( chunk_table_offset, set_chunk_table_offset, @@ -1412,11 +1422,23 @@ struct RafsV6Blob { // SHA256 digest of RAFS blob for ZRAN, containing `blob.meta`, `blob.digest` `blob.toc` and // optionally 'image.boot`. It's all zero for ZRAN blobs with inlined-meta, so need special // handling. + // When using encryption mod, it's reused for saving encryption key. blob_meta_digest: [u8; 32], // Size of RAFS blob for ZRAN. It's zero ZRAN blobs with inlined-meta. + // When using encryption mod, it's reused for saving encryption iv first 8 bytes. blob_meta_size: u64, - - reserved2: [u8; 48], + // When using encryption mod, used for cipher_iv last 8 bytes. + // 0 7 15 + // +------------------+------------------+ + // | blob_meta_size | cipher_iv[8..16] | + // | 8bytes | 8bytes | + // +------------------+------------------+ + // \_ cipher_iv[0..16] _/ + cipher_iv: [u8; 8], + // Crypt algorithm for chunks in the blob. + cipher_algo: u32, + + reserved2: [u8; 36], } impl Default for RafsV6Blob { @@ -1440,8 +1462,10 @@ impl Default for RafsV6Blob { blob_meta_digest: [0u8; 32], blob_meta_size: 0, blob_toc_size: 0u32, + cipher_iv: [0u8; 8], + cipher_algo: (crypt::Algorithm::None as u32).to_le(), - reserved2: [0u8; 48], + reserved2: [0u8; 36], } } } @@ -1473,6 +1497,31 @@ impl RafsV6Blob { let digest = digest::Algorithm::try_from(u32::from_le(self.digest_algo)) .map_err(|_| einval!("invalid digest algorithm in Rafs v6 blob entry"))?; blob_info.set_digester(digest); + let cipher = crypt::Algorithm::try_from(u32::from_le(self.cipher_algo)) + .map_err(|_| einval!("invalid cipher algorithm in Rafs v6 blob entry"))?; + let cipher_object = cipher + .new_cipher() + .map_err(|e| einval!(format!("failed to create new cipher object {}", e)))?; + let cipher_context = match cipher { + crypt::Algorithm::None => None, + crypt::Algorithm::Aes128Xts => { + let mut cipher_iv = [0u8; 16]; + cipher_iv[..8].copy_from_slice(&self.blob_meta_size.to_le_bytes()); + cipher_iv[8..].copy_from_slice(&self.cipher_iv); + Some(CipherContext::new( + self.blob_meta_digest.to_vec(), + cipher_iv.to_vec(), + false, + )?) + } + _ => { + return Err(einval!(format!( + "invalid cipher algorithm {:?} when creating cipher context", + cipher + ))) + } + }; + blob_info.set_cipher_info(cipher, Arc::new(cipher_object), cipher_context); blob_info.set_blob_meta_info( u64::from_le(self.ci_offset), u64::from_le(self.ci_compressed_size), @@ -1498,6 +1547,39 @@ impl RafsV6Blob { let mut blob_id = [0u8; BLOB_SHA256_LEN]; blob_id[..id.len()].copy_from_slice(id); + let (blob_meta_digest, blob_meta_size, cipher_iv) = match blob_info.cipher() { + crypt::Algorithm::None => ( + *blob_info.blob_meta_digest(), + blob_info.blob_meta_size(), + [0u8; 8], + ), + crypt::Algorithm::Aes128Xts => { + let cipher_ctx = match blob_info.cipher_context() { + Some(ctx) => ctx, + None => { + return Err(einval!( + "cipher context unset while using Aes128Xts encryption algorithm" + )) + } + }; + let cipher_key: [u8; 32] = + cipher_ctx.get_meta_cipher_context().0.try_into().unwrap(); + let (cipher_iv_top_half, cipher_iv_bottom_half) = + cipher_ctx.get_meta_cipher_context().1.split_at(8); + ( + cipher_key, + u64::from_le_bytes(cipher_iv_top_half.try_into().unwrap()), + cipher_iv_bottom_half.try_into().unwrap(), + ) + } + _ => { + return Err(einval!(format!( + "invalid cipher algorithm type {:?} in blob info", + blob_info.cipher() + ))) + } + }; + Ok(RafsV6Blob { blob_id, blob_index: blob_info.blob_index().to_le(), @@ -1514,11 +1596,13 @@ impl RafsV6Blob { ci_uncompressed_size: blob_info.meta_ci_uncompressed_size().to_le(), blob_toc_digest: *blob_info.blob_toc_digest(), - blob_meta_digest: *blob_info.blob_meta_digest(), - blob_meta_size: blob_info.blob_meta_size(), + blob_meta_digest, + blob_meta_size, blob_toc_size: blob_info.blob_toc_size(), + cipher_iv, + cipher_algo: (blob_info.cipher() as u32).to_le(), - reserved2: [0u8; 48], + reserved2: [0u8; 36], }) } @@ -1576,10 +1660,11 @@ impl RafsV6Blob { if compress::Algorithm::try_from(u32::from_le(self.compression_algo)).is_err() || compress::Algorithm::try_from(u32::from_le(self.ci_compressor)).is_err() || digest::Algorithm::try_from(u32::from_le(self.digest_algo)).is_err() + || crypt::Algorithm::try_from(self.cipher_algo).is_err() { error!( - "RafsV6Blob: idx {} invalid compression_algo {} ci_compressor {} digest_algo {}", - blob_index, self.compression_algo, self.ci_compressor, self.digest_algo + "RafsV6Blob: idx {} invalid compression_algo {} ci_compressor {} digest_algo {} cipher_algo {}", + blob_index, self.compression_algo, self.ci_compressor, self.digest_algo, self.cipher_algo, ); return false; } @@ -1727,6 +1812,8 @@ impl RafsV6BlobTable { blob_meta_size: u64, blob_toc_size: u32, header: BlobCompressionContextHeader, + cipher_object: Arc, + cipher_context: Option, ) -> u32 { let blob_index = self.entries.len() as u32; let blob_features = BlobFeatures::try_from(header.features()).unwrap(); @@ -1742,6 +1829,7 @@ impl RafsV6BlobTable { blob_info.set_compressor(flags.into()); blob_info.set_digester(flags.into()); + blob_info.set_cipher(flags.into()); blob_info.set_prefetch_info(prefetch_offset as u64, prefetch_size as u64); blob_info.set_blob_meta_info( header.ci_compressed_offset(), @@ -1753,6 +1841,7 @@ impl RafsV6BlobTable { blob_info.set_blob_toc_digest(blob_toc_digest); blob_info.set_blob_meta_size(blob_meta_size); blob_info.set_blob_toc_size(blob_toc_size); + blob_info.set_cipher_info(flags.into(), cipher_object, cipher_context); self.entries.push(Arc::new(blob_info)); diff --git a/rafs/src/metadata/mod.rs b/rafs/src/metadata/mod.rs index 3c54fb49ebe..be4b59ad983 100644 --- a/rafs/src/metadata/mod.rs +++ b/rafs/src/metadata/mod.rs @@ -26,8 +26,8 @@ use nydus_storage::device::{ BlobChunkInfo, BlobDevice, BlobFeatures, BlobInfo, BlobIoMerge, BlobIoVec, }; use nydus_storage::meta::toc::TocEntryList; -use nydus_utils::compress; use nydus_utils::digest::{self, RafsDigest}; +use nydus_utils::{compress, crypt}; use serde::Serialize; use self::layout::v5::RafsV5PrefetchTable; @@ -288,10 +288,12 @@ bitflags! { const INLINED_CHUNK_DIGEST = 0x0000_0100; /// RAFS works in Tarfs mode, which directly uses tar streams as data blobs. const TARTFS_MODE = 0x0000_0200; + /// Data chunks are not encrypted. + const ENCRYPTION_NONE = 0x0100_0000; + /// Data chunks are encrypted with AES-128-XTS. + const ENCRYPTION_ASE_128_XTS = 0x0200_0000; // Reserved for future compatible changes. - const PRESERVED_COMPAT_7 = 0x0100_0000; - const PRESERVED_COMPAT_6 = 0x0200_0000; const PRESERVED_COMPAT_5 = 0x0400_0000; const PRESERVED_COMPAT_4 = 0x0800_0000; const PRESERVED_COMPAT_3 = 0x1000_0000; @@ -356,6 +358,26 @@ impl From for RafsSuperFlags { } } +impl From for crypt::Algorithm { + fn from(flags: RafsSuperFlags) -> Self { + match flags { + // NOTE: only aes-128-xts encryption algorithm supported. + x if x.contains(RafsSuperFlags::ENCRYPTION_ASE_128_XTS) => crypt::Algorithm::Aes128Xts, + _ => crypt::Algorithm::None, + } + } +} + +impl From for RafsSuperFlags { + fn from(c: crypt::Algorithm) -> RafsSuperFlags { + match c { + // NOTE: only aes-128-xts encryption algorithm supported. + crypt::Algorithm::Aes128Xts => RafsSuperFlags::ENCRYPTION_ASE_128_XTS, + _ => RafsSuperFlags::ENCRYPTION_NONE, + } + } +} + /// Configuration information to check compatibility between RAFS filesystems. #[derive(Clone, Copy, Debug)] pub struct RafsSuperConfig { @@ -522,6 +544,15 @@ impl RafsSuperMeta { } } + /// V6: get crypt algorithm to validate chunk data for the filesystem. + pub fn get_cipher(&self) -> crypt::Algorithm { + if self.is_v6() { + self.flags.into() + } else { + crypt::Algorithm::None + } + } + /// Get `RafsSuperConfig` object to check compatibility. pub fn get_config(&self) -> RafsSuperConfig { RafsSuperConfig { diff --git a/src/bin/nydus-image/inspect.rs b/src/bin/nydus-image/inspect.rs index f9ca6998c6b..06af8cbaecf 100644 --- a/src/bin/nydus-image/inspect.rs +++ b/src/bin/nydus-image/inspect.rs @@ -307,6 +307,7 @@ Mapped Block Address: {mapped_blkaddr} Features: {features:?} Compressor: {compressor} Digester: {digester} +Cipher: {cipher} Chunk Size: 0x{chunk_size:x} Chunk Count: {chunk_count} Prefetch Table Offset: {prefetch_tbl_offset} @@ -331,6 +332,7 @@ RAFS Blob Size: {rafs_size} chunk_count = blob_info.chunk_count(), compressor = blob_info.compressor(), digester = blob_info.digester(), + cipher = blob_info.cipher(), prefetch_tbl_offset = blob_info.prefetch_offset(), prefetch_tbl_size = blob_info.prefetch_size(), meta_compressor = blob_info.meta_ci_compressor(), diff --git a/src/bin/nydus-image/main.rs b/src/bin/nydus-image/main.rs index f4a8b39f6e3..7c8ef66593e 100644 --- a/src/bin/nydus-image/main.rs +++ b/src/bin/nydus-image/main.rs @@ -346,6 +346,14 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { .arg( arg_output_json.clone(), ) + .arg( + Arg::new("encrypt") + .long("encrypt") + .short('E') + .help("Encrypt the generated RAFS metadata and data blobs") + .action(ArgAction::SetTrue) + .required(false) + ) ); let app = app.subcommand( @@ -755,7 +763,7 @@ impl Command { .map(|s| s.as_str()) .unwrap_or_default(), )?; - + let encrypt = matches.get_flag("encrypt"); match conversion_type { ConversionType::DirectoryToRafs => { Self::ensure_directory(&source_path)?; @@ -813,6 +821,12 @@ impl Command { conversion_type ); } + if encrypt { + bail!( + "conversion type '{}' conflicts with '--encrypt'", + conversion_type + ) + } } ConversionType::TarToTarfs => { Self::ensure_file(&source_path)?; @@ -878,6 +892,12 @@ impl Command { conversion_type ); } + if encrypt { + bail!( + "conversion type '{}' conflicts with '--encrypt'", + conversion_type + ) + } } ConversionType::EStargzIndexToRef => { Self::ensure_file(&source_path)?; @@ -914,6 +934,12 @@ impl Command { if blob_id.trim() == "" { bail!("'--blob-id' is missing for '--type stargz_index'"); } + if encrypt { + bail!( + "conversion type '{}' conflicts with '--encrypt'", + conversion_type + ) + } } ConversionType::DirectoryToStargz | ConversionType::TargzToStargz @@ -943,6 +969,7 @@ impl Command { blob_storage, blob_inline_meta, features, + encrypt, ); build_ctx.set_fs_version(version); build_ctx.set_chunk_size(chunk_size); @@ -991,13 +1018,23 @@ impl Command { } let mut builder: Box = match conversion_type { - ConversionType::DirectoryToRafs => Box::new(DirectoryBuilder::new()), + ConversionType::DirectoryToRafs => { + if encrypt { + build_ctx.blob_features.insert(BlobFeatures::CHUNK_INFO_V2); + } + Box::new(DirectoryBuilder::new()) + } ConversionType::EStargzIndexToRef => { Box::new(StargzBuilder::new(blob_data_size, &build_ctx)) } ConversionType::EStargzToRafs | ConversionType::TargzToRafs - | ConversionType::TarToRafs => Box::new(TarballBuilder::new(conversion_type)), + | ConversionType::TarToRafs => { + if encrypt { + build_ctx.blob_features.insert(BlobFeatures::CHUNK_INFO_V2); + } + Box::new(TarballBuilder::new(conversion_type)) + } ConversionType::EStargzToRef | ConversionType::TargzToRef | ConversionType::TarToRef => { diff --git a/storage/src/cache/cachedfile.rs b/storage/src/cache/cachedfile.rs index c387400a648..e41359e7973 100644 --- a/storage/src/cache/cachedfile.rs +++ b/storage/src/cache/cachedfile.rs @@ -21,7 +21,7 @@ use std::time::Duration; use fuse_backend_rs::file_buf::FileVolatileSlice; use nix::sys::uio; use nydus_utils::compress::Decoder; -use nydus_utils::crypt::{self, Cipher}; +use nydus_utils::crypt::{self, Cipher, CipherContext}; use nydus_utils::metrics::{BlobcacheMetrics, Metric}; use nydus_utils::{compress, digest, round_up_usize, DelayType, Delayer, FileRangeReader}; use tokio::runtime::Runtime; @@ -30,7 +30,6 @@ use crate::backend::BlobReader; use crate::cache::state::ChunkMap; use crate::cache::worker::{AsyncPrefetchConfig, AsyncPrefetchMessage, AsyncWorkerMgr}; use crate::cache::{BlobCache, BlobIoMergeState}; -use crate::context::CipherContext; use crate::device::{ BlobChunkInfo, BlobInfo, BlobIoDesc, BlobIoRange, BlobIoSegment, BlobIoTag, BlobIoVec, BlobObject, BlobPrefetchRequest, @@ -195,7 +194,7 @@ impl FileCacheEntry { metrics.buffered_backend_size.sub(buffer.size() as u64); let mut t_buf; let buf = if !is_raw_data && is_cache_encrypted { - let (key, iv) = cipher_context.get_chunk_cipher_context(chunk.as_ref()); + let (key, iv) = cipher_context.generate_cipher_context(&chunk.chunk_id().data); let buf = buffer.slice(); t_buf = alloc_buf(round_up_usize(buf.len(), ENCRYPTION_PAGE_SIZE)); @@ -465,6 +464,10 @@ impl BlobCache for FileCacheEntry { self.blob_info.cipher_object() } + fn blob_cipher_context(&self) -> Option { + self.blob_info.cipher_context() + } + fn blob_digester(&self) -> digest::Algorithm { self.blob_info.digester() } @@ -1308,7 +1311,7 @@ impl FileCacheEntry { let size = chunk.uncompressed_size() as usize; let cipher_object = self.cache_cipher_object.clone(); let cipher_context = self.cache_cipher_context.clone(); - let (key, iv) = cipher_context.get_chunk_cipher_context(chunk); + let (key, iv) = cipher_context.generate_cipher_context(&chunk.chunk_id().data); let align_size = round_up_usize(size, ENCRYPTION_PAGE_SIZE); let mut buf = alloc_buf(align_size); @@ -1317,12 +1320,7 @@ impl FileCacheEntry { let mut pos = 0; while pos < buffer.len() { assert!(pos + ENCRYPTION_PAGE_SIZE <= buf.len()); - match cipher_object.decrypt( - key, - Some(&iv), - &buf[pos..pos + ENCRYPTION_PAGE_SIZE], - ENCRYPTION_PAGE_SIZE, - ) { + match cipher_object.decrypt(key, Some(&iv), &buf[pos..pos + ENCRYPTION_PAGE_SIZE]) { Ok(buf2) => { let len = std::cmp::min(buffer.len() - pos, ENCRYPTION_PAGE_SIZE); buffer[pos..pos + len].copy_from_slice(&buf2[..len]); diff --git a/storage/src/cache/dummycache.rs b/storage/src/cache/dummycache.rs index 9f61bcbd9ac..2e554b6cbc0 100644 --- a/storage/src/cache/dummycache.rs +++ b/storage/src/cache/dummycache.rs @@ -24,7 +24,7 @@ use std::sync::Arc; use fuse_backend_rs::file_buf::FileVolatileSlice; use nydus_api::CacheConfigV2; -use nydus_utils::crypt::{Algorithm, Cipher}; +use nydus_utils::crypt::{Algorithm, Cipher, CipherContext}; use nydus_utils::{compress, digest}; use crate::backend::{BlobBackend, BlobReader}; @@ -72,6 +72,10 @@ impl BlobCache for DummyCache { self.blob_info.cipher_object() } + fn blob_cipher_context(&self) -> Option { + self.blob_info.cipher_context() + } + fn blob_digester(&self) -> digest::Algorithm { self.digester } diff --git a/storage/src/cache/filecache/mod.rs b/storage/src/cache/filecache/mod.rs index e5048896146..ef7e36cb4b3 100644 --- a/storage/src/cache/filecache/mod.rs +++ b/storage/src/cache/filecache/mod.rs @@ -22,7 +22,6 @@ use crate::cache::state::{ }; use crate::cache::worker::{AsyncPrefetchConfig, AsyncWorkerMgr}; use crate::cache::{BlobCache, BlobCacheMgr}; -use crate::context::CipherContext; use crate::device::{BlobFeatures, BlobInfo}; use crate::RAFS_DEFAULT_CHUNK_SIZE; @@ -290,7 +289,11 @@ impl FileCacheEntry { let key = hex::decode(mgr.cache_encryption_key.clone()) .map_err(|_e| einval!("invalid cache file encryption key"))?; let cipher = crypt::Algorithm::Aes128Xts.new_cipher()?; - let ctx = CipherContext::new(key, mgr.cache_convergent_encryption)?; + let ctx = crypt::CipherContext::new( + key, + [0u8; 16].to_vec(), + mgr.cache_convergent_encryption, + )?; (Arc::new(cipher), Arc::new(ctx)) } else { (Default::default(), Default::default()) diff --git a/storage/src/cache/mod.rs b/storage/src/cache/mod.rs index e69ceb1eeb8..6d5da820709 100644 --- a/storage/src/cache/mod.rs +++ b/storage/src/cache/mod.rs @@ -16,6 +16,7 @@ //! `BlobCacheMgr`, simply reporting each chunk as cached or not cached according to //! configuration. +use std::borrow::Cow; use std::cmp; use std::io::Result; use std::sync::Arc; @@ -23,7 +24,7 @@ use std::time::Instant; use fuse_backend_rs::file_buf::FileVolatileSlice; use nydus_utils::compress::zlib_random::ZranDecoder; -use nydus_utils::crypt::{self, Cipher}; +use nydus_utils::crypt::{self, Cipher, CipherContext}; use nydus_utils::{compress, digest}; use crate::backend::{BlobBackend, BlobReader}; @@ -156,6 +157,9 @@ pub trait BlobCache: Send + Sync { /// Cipher object to encrypt/decrypt chunk data. fn blob_cipher_object(&self) -> Arc; + /// Cipher context to encrypt/decrypt chunk data. + fn blob_cipher_context(&self) -> Option; + /// Get message digest algorithm to handle chunks in the blob. fn blob_digester(&self) -> digest::Algorithm; @@ -282,8 +286,8 @@ pub trait BlobCache: Send + Sync { /// Read a whole chunk directly from the storage backend. /// - /// The fetched chunk data may be compressed or not, which depends on chunk information from - /// `chunk`.Moreover, chunk data from backend storage may be validated per user's configuration. + /// The fetched chunk data may be compressed or encrypted or not, which depends on chunk information + /// from `chunk`. Moreover, chunk data from backend storage may be validated per user's configuration. fn read_chunk_from_backend( &self, chunk: &dyn BlobChunkInfo, @@ -295,7 +299,12 @@ pub trait BlobCache: Send + Sync { if self.is_zran() || self.is_batch() { return Err(enosys!("read_chunk_from_backend")); - } else if chunk.is_compressed() { + } else if !chunk.is_compressed() && !chunk.is_encrypted() { + let size = self.reader().read(buffer, offset).map_err(|e| eio!(e))?; + if size != buffer.len() { + return Err(eio!("storage backend returns less data than requested")); + } + } else { let c_size = if self.is_legacy_stargz() { self.get_legacy_stargz_size(offset, buffer.len())? } else { @@ -309,13 +318,16 @@ pub trait BlobCache: Send + Sync { if size != raw_buffer.len() { return Err(eio!("storage backend returns less data than requested")); } - self.decompress_chunk_data(&raw_buffer, buffer, true)?; + let raw_buffer = if chunk.is_encrypted() { + trace!("read chunk from backend, ready to decrypt, {}", c_size); + let mut unencryted_buffer = alloc_buf(c_size); + self.decrypt_chunk_data(&raw_buffer, unencryted_buffer.as_mut_slice())?; + unencryted_buffer + } else { + raw_buffer + }; + self.decompress_chunk_data(&raw_buffer, buffer, chunk.is_compressed())?; c_buf = Some(raw_buffer); - } else { - let size = self.reader().read(buffer, offset).map_err(|e| eio!(e))?; - if size != buffer.len() { - return Err(eio!("storage backend returns less data than requested")); - } } let duration = Instant::now().duration_since(start).as_millis(); @@ -389,6 +401,29 @@ pub trait BlobCache: Send + Sync { fn get_blob_meta_info(&self) -> Result>> { Ok(None) } + + /// Decrypt chunk data. + fn decrypt_chunk_data(&self, raw_buffer: &[u8], buffer: &mut [u8]) -> Result<()> { + let cipher_object = self.blob_cipher_object(); + let ctx = if self.blob_cipher_context().is_some() { + self.blob_cipher_context().unwrap() + } else { + return Err(eother!("data encrypted but cipher context unsetted")); + }; + let (key, iv) = ctx.get_meta_cipher_context(); + match cipher_object.decrypt(key, Some(iv), raw_buffer) { + Ok(buf2) => { + buffer.copy_from_slice(&buf2); + } + Err(e) => { + return Err(eother!(format!( + "failed to decrypt data from cache file, {}", + e + ))) + } + } + Ok(()) + } } /// An iterator to enumerate decompressed data for chunks. @@ -571,10 +606,19 @@ impl<'a, 'b> ChunkDecompressState<'a, 'b> { let offset_merged = (c_offset - self.blob_offset) as usize; let end_merged = offset_merged + c_size as usize; - let buf = &self.c_buf[offset_merged..end_merged]; + let raw_buffer = if chunk.is_encrypted() { + trace!("decrypt chunk data, c_size {}, d_size{}", c_size, d_size); + let mut t_buf = alloc_buf(c_size as usize); + self.cache + .decrypt_chunk_data(&self.c_buf[offset_merged..end_merged], &mut t_buf) + .unwrap(); + Cow::Owned(t_buf) + } else { + Cow::Borrowed(&self.c_buf[offset_merged..end_merged]) + }; let mut buffer = alloc_buf(d_size); self.cache - .decompress_chunk_data(buf, &mut buffer, chunk.is_compressed())?; + .decompress_chunk_data(&raw_buffer, &mut buffer, chunk.is_compressed())?; self.cache .validate_chunk_data(chunk, &buffer, false) .map_err(|e| { diff --git a/storage/src/context.rs b/storage/src/context.rs deleted file mode 100644 index c6a83dbd91e..00000000000 --- a/storage/src/context.rs +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2021 Alibaba Cloud. All rights reserved. -// -// SPDX-License-Identifier: Apache-2.0 - -use std::io::Result; - -use crate::device::BlobChunkInfo; - -// Openssl rejects keys with identical first and second halves for xts. -// Use a default key for such cases. -const DEFAULT_CE_KEY: [u8; 32] = [ - 0xac, 0xed, 0x14, 0x69, 0x94, 0x23, 0x1e, 0xca, 0x44, 0x8c, 0xed, 0x2f, 0x6b, 0x40, 0x0c, 0x00, - 0xfd, 0xbb, 0x3f, 0xac, 0xdd, 0xc7, 0xd9, 0xee, 0x83, 0xf6, 0x5c, 0xd9, 0x3c, 0xaa, 0x28, 0x7c, -]; - -/// Struct to provide context information for data encryption/decryption. -#[derive(Default)] -pub struct CipherContext { - key: Vec, - convergent_encryption: bool, -} - -impl CipherContext { - /// Create a new instance of [CipherContext]. - pub fn new(key: Vec, convergent_encryption: bool) -> Result { - if key.len() != 32 { - return Err(einval!("invalid key length for encryption")); - } else if key[0..16] == key[16..32] { - return Err(einval!("invalid symmetry key for encryption")); - } - - Ok(CipherContext { - key, - convergent_encryption, - }) - } - - /// Get context information for chunk encryption/decryption. - pub fn get_chunk_cipher_context<'a>( - &'a self, - chunk: &'a dyn BlobChunkInfo, - ) -> (&'a [u8], Vec) { - let iv = vec![0u8; 16]; - if self.convergent_encryption { - let id = &chunk.chunk_id().data; - if id[0..16] == id[16..32] { - (&DEFAULT_CE_KEY, iv) - } else { - (&chunk.chunk_id().data, iv) - } - } else { - (&self.key, iv) - } - } - - /// Get context information for meta data encryption/decryption. - pub fn get_meta_cipher_context(&self) -> &[u8] { - &self.key - } -} diff --git a/storage/src/device.rs b/storage/src/device.rs index 78ec0b13f76..ef7346a0c78 100644 --- a/storage/src/device.rs +++ b/storage/src/device.rs @@ -38,7 +38,7 @@ use fuse_backend_rs::file_traits::FileReadWriteVolatile; use nydus_api::ConfigV2; use nydus_utils::compress; -use nydus_utils::crypt::{self, Cipher}; +use nydus_utils::crypt::{self, Cipher, CipherContext}; use nydus_utils::digest::{self, RafsDigest}; use crate::cache::BlobCache; @@ -168,6 +168,8 @@ pub struct BlobInfo { meta_path: Arc>, /// V6: support data encryption. cipher_object: Arc, + /// Cipher context for encryption. + cipher_ctx: Option, } impl BlobInfo { @@ -210,6 +212,7 @@ impl BlobInfo { fs_cache_file: None, meta_path: Arc::new(Mutex::new(String::new())), cipher_object: Default::default(), + cipher_ctx: None, }; blob_info.compute_features(); @@ -307,15 +310,31 @@ impl BlobInfo { self.cipher } + /// Set encryption algorithm for the blob. + pub fn set_cipher(&mut self, cipher: crypt::Algorithm) { + self.cipher = cipher; + } + /// Get the cipher object to encrypt/decrypt chunk data. pub fn cipher_object(&self) -> Arc { self.cipher_object.clone() } - /// Set the cipher algorithm to handle chunk data. - pub fn set_cipher_info(&mut self, cipher: crypt::Algorithm, cipher_object: Arc) { + /// Get the cipher context. + pub fn cipher_context(&self) -> Option { + self.cipher_ctx.clone() + } + + /// Set the cipher info, including cipher algo, cipher object and cipher context. + pub fn set_cipher_info( + &mut self, + cipher: crypt::Algorithm, + cipher_object: Arc, + cipher_ctx: Option, + ) { self.cipher = cipher; self.cipher_object = cipher_object; + self.cipher_ctx = cipher_ctx; } /// Get the message digest algorithm for the blob. @@ -537,6 +556,15 @@ impl BlobInfo { }; Ok(id) } + + /// Get the cipher info, including cipher algo, cipher object and cipher context. + pub fn get_cipher_info(&self) -> (crypt::Algorithm, Arc, Option) { + ( + self.cipher, + self.cipher_object.clone(), + self.cipher_ctx.clone(), + ) + } } bitflags! { diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 8cb99d558f1..aae6deaea01 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -48,7 +48,6 @@ use std::fmt::{Display, Formatter}; pub mod backend; pub mod cache; -pub mod context; pub mod device; pub mod factory; pub mod meta; diff --git a/storage/src/meta/batch.rs b/storage/src/meta/batch.rs index f67ee674f8d..fefdc056d9b 100644 --- a/storage/src/meta/batch.rs +++ b/storage/src/meta/batch.rs @@ -126,6 +126,7 @@ impl BatchContextGenerator { &mut self, uncompressed_offset: u64, uncompressed_size: u32, + encrypted: bool, ) -> Result { let mut chunk = BlobChunkInfoV2Ondisk::default(); chunk.set_compressed_offset(0); @@ -136,6 +137,7 @@ impl BatchContextGenerator { chunk.set_batch_index(self.contexts.len() as u32); chunk.set_uncompressed_offset_in_batch_buf(self.chunk_data_buf_len() as u32); chunk.set_compressed(true); + chunk.set_encrypted(encrypted); Ok(chunk) } diff --git a/storage/src/meta/chunk_info_v2.rs b/storage/src/meta/chunk_info_v2.rs index 562082b009f..9a2ce255a9f 100644 --- a/storage/src/meta/chunk_info_v2.rs +++ b/storage/src/meta/chunk_info_v2.rs @@ -14,10 +14,10 @@ const CHUNK_V2_UNCOMP_OFFSET_SHIFT: u64 = 12; const CHUNK_V2_UNCOMP_SIZE_SHIFT: u64 = 32; const CHUNK_V2_FLAG_MASK: u64 = 0xff << 56; const CHUNK_V2_FLAG_COMPRESSED: u64 = 0x1 << 56; -const CHUNK_V2_FLAG_ENCRYPTED: u64 = 0x2 << 56; const CHUNK_V2_FLAG_ZRAN: u64 = 0x2 << 56; const CHUNK_V2_FLAG_BATCH: u64 = 0x4 << 56; -const CHUNK_V2_FLAG_VALID: u64 = 0x7 << 56; +const CHUNK_V2_FLAG_ENCRYPTED: u64 = 0x8 << 56; +const CHUNK_V2_FLAG_VALID: u64 = 0xf << 56; /// Chunk compression information on disk format V2. #[repr(C, packed)] @@ -40,7 +40,6 @@ impl BlobChunkInfoV2Ondisk { } } - #[allow(unused)] pub(crate) fn set_encrypted(&mut self, encrypted: bool) { if encrypted { self.uncomp_info |= u64::to_le(CHUNK_V2_FLAG_ENCRYPTED); @@ -198,10 +197,12 @@ impl BlobMetaChunkInfo for BlobChunkInfoV2Ondisk { || self.uncompressed_end() > state.uncompressed_size || self.uncompressed_size() == 0 || (!state.is_separate() && !self.is_batch() && self.compressed_size() == 0) - || (!self.is_compressed() && self.uncompressed_size() != self.compressed_size()) + || (!self.is_encrypted() + && !self.is_compressed() + && self.uncompressed_size() != self.compressed_size()) { return Err(einval!(format!( - "invalid chunk, blob: index {}/c_size 0x{:}/d_size 0x{:x}, chunk: c_end 0x{:x}/d_end 0x{:x}/compressed {} batch {} zran {}", + "invalid chunk, blob: index {}/c_size 0x{:}/d_size 0x{:x}, chunk: c_end 0x{:x}/d_end 0x{:x}/compressed {} batch {} zran {} encrypted {}", state.blob_index, state.compressed_size, state.uncompressed_size, @@ -210,6 +211,7 @@ impl BlobMetaChunkInfo for BlobChunkInfoV2Ondisk { self.is_compressed(), self.is_batch(), self.is_zran(), + self.is_encrypted() ))); } diff --git a/storage/src/meta/mod.rs b/storage/src/meta/mod.rs index 7bfb477ac7e..9997b7296a9 100644 --- a/storage/src/meta/mod.rs +++ b/storage/src/meta/mod.rs @@ -33,10 +33,10 @@ use std::ops::{Add, BitAnd, Not}; use std::path::PathBuf; use std::sync::Arc; -use nydus_utils::compress; use nydus_utils::compress::zlib_random::ZranContext; use nydus_utils::digest::{DigestData, RafsDigest}; use nydus_utils::filemap::FileMapState; +use nydus_utils::{compress, crypt}; use crate::backend::BlobReader; use crate::device::v5::BlobV5ChunkInfo; @@ -753,28 +753,73 @@ impl BlobCompressionContextInfo { ))); } - let (uncompressed, header) = if blob_info.meta_ci_compressor() == compress::Algorithm::None - { - let uncompressed = &raw_data[0..uncompressed_size as usize]; - let header = &raw_data[uncompressed_size as usize..expected_raw_size]; - (Cow::Borrowed(uncompressed), header) + let (decrypted, header) = if blob_info.cipher() != crypt::Algorithm::None { + let cipher_ctx = match blob_info.cipher_context() { + Some(ctx) => ctx, + _ => { + return Err(eio!(format!( + "failed to read metadata for blob {} from backend, cipher {}, cipher context is none", + blob_info.blob_id(), + blob_info.cipher(), + ))); + } + }; + let (key, iv) = cipher_ctx.get_meta_cipher_context(); + let decrypted = match blob_info.cipher_object().decrypt( + key, + Some(iv), + &raw_data[0..compressed_size as usize], + ) { + Ok(buf) => Cow::Owned(buf), + Err(e) => { + return Err(eio!(format!( + "failed to decrypt metadata for blob {} from backend, cipher {}, encrypted data size {}, {}", + blob_info.blob_id(), + blob_info.cipher(), + compressed_size, + e + ))); + } + }; + let header = match blob_info.cipher_object().decrypt( + key, + Some(iv), + &raw_data[compressed_size as usize..expected_raw_size], + ) { + Ok(buf) => Cow::Owned(buf), + Err(e) => { + return Err(eio!(format!( + "failed to decrypt meta header for blob {} from backend, cipher {}, encrypted data size {}, {}", + blob_info.blob_id(), + blob_info.cipher(), + compressed_size, + e + ))); + } + }; + (decrypted, header) } else { + ( + Cow::Borrowed(&raw_data[0..compressed_size as usize]), + Cow::Borrowed(&raw_data[compressed_size as usize..expected_raw_size]), + ) + }; + + let uncompressed = if blob_info.meta_ci_compressor() != compress::Algorithm::None { // Lz4 does not support concurrent decompression of the same data into // the same piece of memory. There will be multiple containers mmap the // same file, causing the buffer to be shared between different // processes. This will cause data errors due to race issues when // decompressing with lz4. We solve this problem by creating a temporary // memory to hold the decompressed data. - // // Because this process will only be executed when the blob.meta file is // created for the first time, which means that a machine will only // execute the process once when the blob.meta is created for the first // time, the memory consumption and performance impact are relatively // small. let mut uncompressed = vec![0u8; uncompressed_size as usize]; - let header = &raw_data[compressed_size as usize..expected_raw_size]; compress::decompress( - &raw_data[0..compressed_size as usize], + &decrypted, &mut uncompressed, blob_info.meta_ci_compressor(), ) @@ -782,14 +827,14 @@ impl BlobCompressionContextInfo { error!("failed to decompress blob meta data: {}", e); e })?; - (Cow::Owned(uncompressed), header) + Cow::Owned(uncompressed) + } else { + decrypted }; - buffer[0..uncompressed_size as usize].copy_from_slice(&uncompressed); buffer[aligned_uncompressed_size as usize ..(aligned_uncompressed_size + BLOB_CCT_HEADER_SIZE) as usize] - .copy_from_slice(header); - + .copy_from_slice(&header); Ok(()) } @@ -1112,6 +1157,7 @@ impl BlobMetaChunkArray { uncompressed_offset: u64, uncompressed_size: u32, compressed: bool, + encrypted: bool, is_batch: bool, data: u64, ) { @@ -1123,6 +1169,7 @@ impl BlobMetaChunkArray { meta.set_uncompressed_offset(uncompressed_offset); meta.set_uncompressed_size(uncompressed_size); meta.set_compressed(compressed); + meta.set_encrypted(encrypted); meta.set_batch(is_batch); meta.set_data(data); v.push(meta); diff --git a/storage/src/meta/zran.rs b/storage/src/meta/zran.rs index 6c039a93520..0375a41847f 100644 --- a/storage/src/meta/zran.rs +++ b/storage/src/meta/zran.rs @@ -177,6 +177,7 @@ impl ZranContextGenerator { chunk.set_zran_index(info.ci_index); chunk.set_zran_offset(info.ci_offset); chunk.set_compressed(true); + chunk.set_encrypted(false); self.uncomp_pos += round_up_4k(info.ci_len as u64); diff --git a/utils/src/crypt.rs b/utils/src/crypt.rs index d387745cfdc..c9c0a55f68a 100644 --- a/utils/src/crypt.rs +++ b/utils/src/crypt.rs @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Alibaba Cloud. All rights reserved. +// Copyright (C) 2022-2023 Alibaba Cloud. All rights reserved. // // SPDX-License-Identifier: Apache-2.0 @@ -9,7 +9,27 @@ use std::fmt::{self, Debug, Formatter}; use std::io::Error; use std::str::FromStr; -use openssl::symm; +use openssl::{rand, symm}; + +// The length of the data unit to be encrypted. +pub const DATA_UNIT_LENGTH: usize = 16; +// The length of thd iv (Initialization Vector) to do AES-XTS encryption. +pub const AES_XTS_IV_LENGTH: usize = 16; +// The length of the key to do AES-128-XTS encryption. +pub const AES_128_XTS_KEY_LENGTH: usize = 32; +// The length of the key to do AES-256-XTS encryption. +pub const AES_256_XTS_KEY_LENGTH: usize = 64; + +// The padding magic end. +pub const PADDING_MAGIC_END: [u8; 2] = [0x78, 0x90]; +// DATA_UNIT_LENGTH + length of PADDING_MAGIC_FLAG. +pub const PADDING_LENGTH: usize = 18; +// Openssl rejects keys with identical first and second halves for xts. +// Use a default key for such cases. +const DEFAULT_CE_KEY: [u8; 32] = [ + 0xac, 0xed, 0x14, 0x69, 0x94, 0x23, 0x1e, 0xca, 0x44, 0x8c, 0xed, 0x2f, 0x6b, 0x40, 0x0c, 0x00, + 0xfd, 0xbb, 0x3f, 0xac, 0xdd, 0xc7, 0xd9, 0xee, 0x83, 0xf6, 0x5c, 0xd9, 0x3c, 0xaa, 0x28, 0x7c, +]; /// Supported cipher algorithms. #[repr(u32)] @@ -159,16 +179,18 @@ impl Cipher { match self { Cipher::None => Ok(Cow::from(data)), Cipher::Aes128Xts(cipher) => { - assert_eq!(key.len(), 32); + assert_eq!(key.len(), AES_128_XTS_KEY_LENGTH); let mut buf; - let data = if data.len() >= 16 { + let data = if data.len() >= DATA_UNIT_LENGTH { data } else { // CMS (Cryptographic Message Syntax). - // This pads with the same value as the number of padding bytes. - let val = (16 - data.len()) as u8; - buf = [val; 16]; + // This pads with the same value as the number of padding bytes + // and appends the magic padding end. + let val = (DATA_UNIT_LENGTH - data.len()) as u8; + buf = [val; PADDING_LENGTH]; buf[..data.len()].copy_from_slice(data); + buf[DATA_UNIT_LENGTH..PADDING_LENGTH].copy_from_slice(&PADDING_MAGIC_END); &buf }; Self::cipher(*cipher, symm::Mode::Encrypt, key, iv, data) @@ -176,16 +198,15 @@ impl Cipher { .map_err(|e| eother!(format!("failed to encrypt data, {}", e))) } Cipher::Aes256Xts(cipher) => { - assert_eq!(key.len(), 64); + assert_eq!(key.len(), AES_256_XTS_KEY_LENGTH); let mut buf; - let data = if data.len() >= 16 { + let data = if data.len() >= DATA_UNIT_LENGTH { data } else { - // CMS (Cryptographic Message Syntax). - // This pads with the same value as the number of padding bytes. - let val = (16 - data.len()) as u8; - buf = [val; 16]; + let val = (DATA_UNIT_LENGTH - data.len()) as u8; + buf = [val; PADDING_LENGTH]; buf[..data.len()].copy_from_slice(data); + buf[DATA_UNIT_LENGTH..PADDING_LENGTH].copy_from_slice(&PADDING_MAGIC_END); &buf }; Self::cipher(*cipher, symm::Mode::Encrypt, key, iv, data) @@ -199,13 +220,7 @@ impl Cipher { } /// Decrypt encrypted data with optional IV and return the decrypted data. - pub fn decrypt( - &self, - key: &[u8], - iv: Option<&[u8]>, - data: &[u8], - size: usize, - ) -> Result, Error> { + pub fn decrypt(&self, key: &[u8], iv: Option<&[u8]>, data: &[u8]) -> Result, Error> { let mut data = match self { Cipher::None => Ok(data.to_vec()), Cipher::Aes128Xts(cipher) => Self::cipher(*cipher, symm::Mode::Decrypt, key, iv, data) @@ -218,18 +233,19 @@ impl Cipher { }?; // Trim possible padding. - if data.len() > size { - if data.len() != 16 { - return Err(einval!("Cipher::decrypt: invalid padding data")); - } - let val = (16 - size) as u8; - for item in data.iter().skip(size) { - if *item != val { - return Err(einval!("Cipher::decrypt: invalid padding data")); - } + if data.len() == PADDING_LENGTH + && data[PADDING_LENGTH - PADDING_MAGIC_END.len()..PADDING_LENGTH] == PADDING_MAGIC_END + { + let val = data[DATA_UNIT_LENGTH - 1] as usize; + if val < DATA_UNIT_LENGTH { + data.truncate(DATA_UNIT_LENGTH - val); + } else { + return Err(einval!(format!( + "Cipher::decrypt: invalid padding data, value {}", + val, + ))); } - data.truncate(size); - } + }; Ok(data) } @@ -277,8 +293,8 @@ impl Cipher { match self { Cipher::None => plaintext_size, Cipher::Aes128Xts(_) | Cipher::Aes256Xts(_) => { - if plaintext_size < 16 { - 16 + if plaintext_size < DATA_UNIT_LENGTH { + DATA_UNIT_LENGTH } else { plaintext_size } @@ -320,6 +336,74 @@ impl Cipher { out.truncate(count + rest); Ok(out) } + + pub fn generate_key_for_aes_xts(size: usize) -> Result, Error> { + let mut buf = vec![0u8; size]; + if let Err(e) = rand::rand_bytes(&mut buf) { + Err(eother!(format!( + "failed to generate key for Aes256Xts, {}", + e + ))) + } else { + Ok(Self::tweak_key_for_xts(&buf).to_vec()) + } + } + + pub fn generate_random_iv(size: usize) -> Result, Error> { + let mut buf = vec![0u8; size]; + if let Err(e) = rand::rand_bytes(&mut buf) { + Err(eother!(format!("failed to generate iv, {}", e))) + } else { + Ok(buf) + } + } +} + +/// Struct to provide context information for data encryption/decryption. +#[derive(Default, Debug, Clone)] +pub struct CipherContext { + key: Vec, + iv: Vec, + convergent_encryption: bool, +} + +impl CipherContext { + /// Create a new instance of [CipherContext]. + pub fn new(key: Vec, iv: Vec, convergent_encryption: bool) -> Result { + if key.len() != 32 { + return Err(einval!("invalid key length for encryption")); + } else if key[0..16] == key[16..32] { + return Err(einval!("invalid symmetry key for encryption")); + } + + Ok(CipherContext { + key, + iv, + convergent_encryption, + }) + } + + /// Get context information from data for encryption/decryption. + pub fn generate_cipher_context<'a>( + &'a self, + data: &'a [u8; AES_128_XTS_KEY_LENGTH], + ) -> (&'a [u8], Vec) { + let iv = vec![0u8; 16]; + if self.convergent_encryption { + if data[0..16] == data[16..32] { + (&DEFAULT_CE_KEY, iv) + } else { + (data, iv) + } + } else { + (&self.key, iv) + } + } + + /// Get context information for meta data encryption/decryption. + pub fn get_meta_cipher_context(&self) -> (&[u8], &[u8]) { + (&self.key, &self.iv) + } } /// A customized buf allocator that avoids zeroing @@ -353,7 +437,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[0u8; 16]), b"1") .unwrap(); assert_eq!(ciphertext1, ciphertext2); - assert_eq!(ciphertext2.len(), 16); + assert_eq!(ciphertext2.len(), PADDING_LENGTH); let ciphertext3 = cipher .encrypt(key.as_slice(), Some(&[0u8; 16]), b"11111111111111111") @@ -386,7 +470,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[0u8; 16]), b"1") .unwrap(); assert_eq!(ciphertext1, ciphertext2); - assert_eq!(ciphertext2.len(), 16); + assert_eq!(ciphertext2.len(), PADDING_LENGTH); let ciphertext3 = cipher .encrypt(key.as_slice(), Some(&[0u8; 16]), b"11111111111111111") @@ -416,7 +500,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[0u8; 16]), b"1") .unwrap(); let plaintext1 = cipher - .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext1, 1) + .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext1) .unwrap(); assert_eq!(&plaintext1, b"1"); @@ -424,7 +508,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[0u8; 16]), b"11111111111111111") .unwrap(); let plaintext2 = cipher - .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext2, 17) + .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext2) .unwrap(); assert_eq!(&plaintext2, b"11111111111111111"); @@ -432,7 +516,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[1u8; 16]), b"11111111111111111") .unwrap(); let plaintext3 = cipher - .decrypt(key.as_slice(), Some(&[1u8; 16]), &ciphertext3, 17) + .decrypt(key.as_slice(), Some(&[1u8; 16]), &ciphertext3) .unwrap(); assert_eq!(&plaintext3, b"11111111111111111"); } @@ -447,7 +531,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[0u8; 16]), b"1") .unwrap(); let plaintext1 = cipher - .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext1, 1) + .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext1) .unwrap(); assert_eq!(&plaintext1, b"1"); @@ -455,7 +539,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[0u8; 16]), b"11111111111111111") .unwrap(); let plaintext2 = cipher - .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext2, 17) + .decrypt(key.as_slice(), Some(&[0u8; 16]), &ciphertext2) .unwrap(); assert_eq!(&plaintext2, b"11111111111111111"); @@ -463,7 +547,7 @@ mod tests { .encrypt(key.as_slice(), Some(&[1u8; 16]), b"11111111111111111") .unwrap(); let plaintext3 = cipher - .decrypt(key.as_slice(), Some(&[1u8; 16]), &ciphertext3, 17) + .decrypt(key.as_slice(), Some(&[1u8; 16]), &ciphertext3) .unwrap(); assert_eq!(&plaintext3, b"11111111111111111"); }