diff --git a/Cargo.lock b/Cargo.lock index a5ea303314b..adb81d21a78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1310,12 +1310,15 @@ dependencies = [ "nydus-rafs", "nydus-storage", "nydus-utils", + "rand", "serde", "serde_json", "sha2", "tar", + "tempfile", "vmm-sys-util", "xattr", + "zstd 0.12.4", ] [[package]] @@ -1500,7 +1503,7 @@ dependencies = [ "thiserror", "tokio", "vmm-sys-util", - "zstd", + "zstd 0.11.2+zstd.1.5.2", ] [[package]] @@ -2780,7 +2783,16 @@ version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ - "zstd-safe", + "zstd-safe 5.0.2+zstd.1.5.2", +] + +[[package]] +name = "zstd" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" +dependencies = [ + "zstd-safe 6.0.6", ] [[package]] @@ -2793,12 +2805,22 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "zstd-safe" +version = "6.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" +dependencies = [ + "libc", + "zstd-sys", +] + [[package]] name = "zstd-sys" -version = "2.0.1+zstd.1.5.2" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", - "libc", + "pkg-config", ] diff --git a/builder/Cargo.toml b/builder/Cargo.toml index fa76a36f947..a1f95b3dab4 100644 --- a/builder/Cargo.toml +++ b/builder/Cargo.toml @@ -22,6 +22,9 @@ sha2 = "0.10.2" tar = "0.4.40" vmm-sys-util = "0.11.0" xattr = "1.0.1" +rand = "0.8.5" +zstd = "0.12" +tempfile = "3.2" nydus-api = { version = "0.3", path = "../api" } nydus-rafs = { version = "0.3", path = "../rafs" } diff --git a/builder/src/chunkdict_generator.rs b/builder/src/chunkdict_generator.rs index 4f7ab105d2b..07733ebc7b1 100644 --- a/builder/src/chunkdict_generator.rs +++ b/builder/src/chunkdict_generator.rs @@ -16,20 +16,38 @@ use super::core::node::{ChunkSource, NodeInfo}; use super::{BlobManager, Bootstrap, BootstrapManager, BuildContext, BuildOutput, Tree}; +use crate::core::blob::Blob; use crate::core::node::Node; -use crate::NodeChunk; -use anyhow::Result; +use crate::{ArtifactWriter, BlobContext, NodeChunk}; +use anyhow::{Ok, Result}; +use nydus_api::BackendConfigV2; use nydus_rafs::metadata::chunk::ChunkWrapper; use nydus_rafs::metadata::inode::InodeWrapper; -use nydus_rafs::metadata::layout::RafsXAttrs; +use nydus_rafs::metadata::layout::v6::RafsV6BlobTable; +use nydus_rafs::metadata::layout::{RafsBlobTable, RafsXAttrs}; +use nydus_storage::device::{BlobFeatures, BlobInfo}; +use nydus_storage::factory::BlobFactory; use nydus_storage::meta::BlobChunkInfoV1Ondisk; +use nydus_utils::compress; use nydus_utils::compress::Algorithm; use nydus_utils::digest::RafsDigest; +use tempfile::TempDir; + +use crate::finalize_blob; +use crate::Artifact; +use core::panic; use std::ffi::OsString; +use std::fs::File; +use std::io::Write; use std::mem::size_of; +use std::ops::Add; +use std::ops::{Rem, Sub}; use std::path::PathBuf; +use std::rc::Rc; use std::str::FromStr; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; +use std::u32; +use zstd::decode_all; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct ChunkdictChunkInfo { @@ -56,6 +74,11 @@ pub struct ChunkdictBlobInfo { /// Struct to generate chunkdict RAFS bootstrap. pub struct Generator {} +struct BlobIdAndCompressor { + pub blob_id: String, + pub compressor: compress::Algorithm, +} + impl Generator { // Generate chunkdict RAFS bootstrap. pub fn generate( @@ -90,6 +113,281 @@ impl Generator { BuildOutput::new(blob_mgr, &bootstrap_mgr.bootstrap_storage) } + /// Generate a new bootstrap for prefetch. + pub fn generate_prefetch( + tree: &mut Tree, + ctx: &mut BuildContext, + bootstrap_mgr: &mut BootstrapManager, + blobtable: &mut RafsV6BlobTable, + blobs_dir_path: PathBuf + ) -> Result<()> { + let (prefetch_nodes, _) = ctx.prefetch.get_file_nodes(); + for node in prefetch_nodes { + let node = node.lock().unwrap(); + match node.inode { + InodeWrapper::Ref(_) => { + debug!("Node Wrapper: Reference") + } + _ => { + debug!("Not Reference") + } + } + } + + // create a new blob for prefetch layer + let blob_layer_num = blobtable.entries.len(); + // TODO: Add Appropriate BlobFeatures + let mut prefetch_blob_info = BlobInfo::new( + blob_layer_num as u32, + String::from("Prefetch-blob"), + 0, + 0, + ctx.chunk_size, + // If chunkcount is zero, it will add a feature + u32::MAX, + BlobFeatures::ALIGNED + | BlobFeatures::INLINED_CHUNK_DIGEST + | BlobFeatures::HAS_TAR_HEADER + | BlobFeatures::HAS_TOC + | BlobFeatures::CAP_TAR_TOC, + ); + + // for every node in prefetch list, change the offset and blob id + let (file_nodes_prefetch, _) = ctx.prefetch.get_file_nodes(); + + let mut backend_config = BackendConfigV2 { + backend_type: String::from("localfs"), + localdisk: None, + localfs: Some(nydus_api::LocalFsConfig { + dir: blobs_dir_path.display().to_string(), + alt_dirs: Vec::new(), + ..Default::default() + }), + oss: None, + s3: None, + registry: None, + http_proxy: None, + }; + + // Revert files + let mut blobs_id_and_compressor: Vec = Vec::new(); + for blob in &blobtable.entries { + blobs_id_and_compressor.push(BlobIdAndCompressor { + blob_id: blob.blob_id(), + compressor: blob.compressor(), + }); + } + + let tmp_dir = TempDir::new().unwrap(); + let tmp_path = tmp_dir.into_path(); + debug!("temp path: {}", tmp_path.display()); + + Self::revert_files( + blobs_id_and_compressor, + file_nodes_prefetch.clone(), + &mut backend_config, + tmp_path.clone(), + ); + + let prefetch_blob_index = prefetch_blob_info.blob_index(); + let mut chunk_count = 0; + // For every chunk, need to align to 4k + let mut prefetch_blob_offset = 0; + let mut meta_uncompressed_size = 0; + let mut chunk_index_in_prefetch = 0; + for node in &file_nodes_prefetch { + let child = tree.get_node(&node.lock().unwrap().path()).unwrap(); + let mut child = child.node.lock().unwrap(); + child.layer_idx = prefetch_blob_index as u16; + for chunk in &mut child.chunks { + chunk_count += 1; + let inner = Arc::make_mut(&mut chunk.inner); + inner.set_blob_index(prefetch_blob_index); + inner.set_index(chunk_index_in_prefetch); + chunk_index_in_prefetch += 1; + inner.set_compressed_offset(prefetch_blob_offset); + inner.set_uncompressed_offset(prefetch_blob_offset); + prefetch_blob_offset += inner.uncompressed_size() as u64; + meta_uncompressed_size += inner.uncompressed_size() as u64; + prefetch_blob_offset = Self::align_to_4k(prefetch_blob_offset); + // set meta ci data + prefetch_blob_info.set_meta_ci_uncompressed_size( + (prefetch_blob_info.meta_ci_uncompressed_size() + + size_of::() as u64) as usize, + ); + prefetch_blob_info.set_meta_ci_compressed_size( + (prefetch_blob_info.meta_ci_compressed_size() + + size_of::() as u64) as usize, + ); + } + } + // align prefetch blob size to 4096 + prefetch_blob_info.set_meta_ci_offset(0x200 + meta_uncompressed_size as usize); + prefetch_blob_info.set_chunk_count(chunk_count); + prefetch_blob_info.set_compressed_size(prefetch_blob_offset as usize); + prefetch_blob_info.set_uncompressed_size(prefetch_blob_offset as usize); + prefetch_blob_info.set_compressor(Algorithm::Zstd); + + let mut blob_table_withprefetch = RafsV6BlobTable::new(); + for blob in blobtable.entries.iter() { + blob_table_withprefetch.entries.push(blob.clone()); + } + blob_table_withprefetch + .entries + .push(prefetch_blob_info.clone().into()); + + // Build Prefetch Blob + let mut prefetch_build_ctx = BuildContext { + blob_id: String::from("Prefetch-blob"), + compressor: ctx.compressor, + prefetch: ctx.prefetch.clone(), + ..Default::default() + }; + + let mut prefetch_blob_mgr = BlobManager::new(nydus_utils::digest::Algorithm::Blake3); + // prefetch_blob_mgr.set_current_blob_index(0); + let mut prefetch_blob_ctx = + BlobContext::from(&prefetch_build_ctx, &prefetch_blob_info, ChunkSource::Build) + .unwrap(); + prefetch_blob_ctx.blob_meta_info_enabled = true; + prefetch_blob_mgr.add_blob(prefetch_blob_ctx); + + let mut blob_writer: Box = Box::new( + Box::new(ArtifactWriter::new(crate::ArtifactStorage::SingleFile( + PathBuf::from("./prefetch_blob"), + ))) + .unwrap(), + ); + Blob::dump( + &prefetch_build_ctx, + &mut prefetch_blob_mgr, + &mut *blob_writer, + Some(tmp_path), + ) + .unwrap(); + if let Some((_, blob_ctx)) = prefetch_blob_mgr.get_current_blob() { + blob_ctx.set_meta_info_enabled(true); + Blob::dump_meta_data(&prefetch_build_ctx, blob_ctx, blob_writer.as_mut()).unwrap(); + } else { + panic!(); + } + // replace the prefetch build ctx blod id to empty + // to generate blob id + prefetch_build_ctx.blob_id = String::from(""); + prefetch_blob_mgr.get_current_blob().unwrap().1.blob_id = String::from(""); + finalize_blob( + &mut prefetch_build_ctx, + &mut prefetch_blob_mgr, + blob_writer.as_mut(), + )?; + debug!("prefetch blob id: {}", prefetch_build_ctx.blob_id); + // Build bootstrap + let mut bootstrap_ctx = bootstrap_mgr.create_ctx().unwrap(); + + let mut bootstrap = Bootstrap::new(tree.clone()).unwrap(); + + bootstrap.build(ctx, &mut bootstrap_ctx).unwrap(); + + // The prefetch blob id generated, Rewrite + let updated_entries: Vec> = blob_table_withprefetch + .entries + .iter() + .map(|blobinfo| { + if blobinfo.blob_id() == String::from("Prefetch-blob") { + let mut prefetch_blob_info = (**blobinfo).clone(); + prefetch_blob_info.set_blob_id(prefetch_build_ctx.blob_id.clone()); + Arc::new(prefetch_blob_info) + } else { + Arc::clone(blobinfo) + } + }) + .collect(); + blob_table_withprefetch.entries = updated_entries; + // Dump Bootstrap + let storage = &mut bootstrap_mgr.bootstrap_storage; + let blob_table_withprefetch = RafsBlobTable::V6(blob_table_withprefetch); + bootstrap.dump(ctx, storage, &mut bootstrap_ctx, &blob_table_withprefetch)?; + Ok(()) + } + + /// Revert files from the blob + fn revert_files( + blob_ids: Vec, + nodes: Vec>>, + backend: &mut BackendConfigV2, + workdir: PathBuf, + ) { + debug!("BackEnd: {:?}", backend); + for node in nodes { + let node = node.lock().unwrap(); + let blob_id = node.chunks.get(0).unwrap().inner.blob_index(); + let blob_id = blob_ids.get(blob_id as usize).unwrap().blob_id.clone(); + // backend.localfs.unwrap().blob_file = + let mut node_backend = backend.clone(); + let blob_dir = backend.localfs.as_ref().unwrap().dir.clone(); + let mut blob_file = PathBuf::from(blob_dir); + blob_file.push(blob_id); + if let Some(localfs_config) = &mut node_backend.localfs { + localfs_config.blob_file = blob_file.display().to_string(); + } + + let blob_mgr = BlobFactory::new_backend(&node_backend, "Fix-Prefetch-Blob-ID").unwrap(); + + debug!("Node Path: {}", node.path().display()); + let mut path = PathBuf::from(&workdir); + path.push(node.path().strip_prefix("/").unwrap()); + let mut file = File::create(path).unwrap(); + for chunk in &node.chunks { + let inner = &chunk.inner; + // Read From Blob + let blob_index = inner.blob_index(); + debug!("blob index: {}", blob_index); + let BlobIdAndCompressor { + blob_id, + compressor, + } = blob_ids.get(blob_index as usize).unwrap(); + + let reader = blob_mgr.get_reader(blob_id.as_ref()).unwrap(); + debug!("blob id: {}", blob_id); + let compressed_size = inner.compressed_size(); + debug!("compressed size as u8: {}", compressed_size as u8); + let mut buf: Vec = vec![0; compressed_size as usize]; + debug!("buf len: {}", buf.len()); + let compressed_offset = inner.compressed_offset(); + debug!("compressed {}/{}", compressed_size, compressed_offset); + let size = reader.read(&mut buf, compressed_offset).unwrap(); + debug!("size: {}", size); + debug!("buf len: {}", buf.len()); + match compressor { + Algorithm::Zstd => { + let revert = Self::decompress_zstd(&buf).unwrap(); + debug!("Revert size: {}", revert.len()); + file.write_all(&revert).unwrap(); + } + _ => unimplemented!(), + } + } + } + } + + fn decompress_zstd(compressed: &[u8]) -> Result> { + Ok(decode_all(compressed)?) + } + + fn align_to_4k(offset: T) -> T + where + T: Sub + Add + Rem + PartialEq + TryFrom + Copy, + >::Error: std::fmt::Debug, + { + let alignment = T::try_from(4096).unwrap(); + let remainder = offset % alignment; + if remainder == T::try_from(0).unwrap() { + offset + } else { + offset + (alignment - remainder) + } + } + /// Validate tree. fn validate_tree(tree: &Tree) -> Result<()> { let pre = &mut |t: &Tree| -> Result<()> { @@ -278,3 +576,54 @@ impl Generator { Ok(()) } } + +#[cfg(test)] +mod test { + use std::env; + + use nydus_rafs::fs::Rafs; + + use crate::{core::prefetch, Features, Prefetch}; + + use super::*; + + #[test] + fn test_backend() { + println!("current dir: {}", env::current_dir().unwrap().display()); + let backend_config = BackendConfigV2 { + backend_type: String::from("localfs"), + localdisk: None, + localfs: Some(nydus_api::LocalFsConfig { + blob_file: String::from("/root/nydusTestImage/test-image/blobs/f22c9758339fcf8fe77a4ca0b4deba2ededad9904bdf8e520df2c0277e666070"), + dir: String::from("/root/nydusTestImage/test-image/blobs/"), + alt_dirs: Vec::new(), + }), + oss: None, + s3: None, + registry: None, + http_proxy: None, + }; + + let blob_mgr = BlobFactory::new_backend(&backend_config, "Fix-Prefetch-Blob-ID").unwrap(); + let reader = blob_mgr + .get_reader("f22c9758339fcf8fe77a4ca0b4deba2ededad9904bdf8e520df2c0277e666070") + .unwrap(); + println!("Reader Done"); + let mut buf2: Vec = vec![0; 19]; + let size = reader.read(&mut buf2, 19).unwrap(); + println!("size: {}", size); + println!("buf len: {}", buf2.len()); + + let revert = Generator::decompress_zstd(&buf2).unwrap(); + println!("len: {}", revert.len()); + + let mut buf: Vec = vec![0; 19]; + let size = reader.read(&mut buf, 0).unwrap(); + println!("size: {}", size); + + let revert = Generator::decompress_zstd(&buf).unwrap(); + println!("len: {}", revert.len()); + } +} + +// Read the blob, get the chunk, fix dump node chunk function, Blob::dump generate a blob diff --git a/builder/src/compact.rs b/builder/src/compact.rs index 3ff27eeac69..95730a421e2 100644 --- a/builder/src/compact.rs +++ b/builder/src/compact.rs @@ -181,7 +181,6 @@ impl ChunkSet { Blob::dump_meta_data(build_ctx, new_blob_ctx, &mut blob_writer)?; let blob_id = new_blob_ctx.blob_id(); blob_writer.finalize(blob_id)?; - Ok(changed_chunks) } } diff --git a/builder/src/core/blob.rs b/builder/src/core/blob.rs index cc8e4d56737..7b130ebaae5 100644 --- a/builder/src/core/blob.rs +++ b/builder/src/core/blob.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use std::borrow::Cow; +use std::path::PathBuf; use std::slice; use anyhow::{Context, Result}; @@ -27,15 +28,17 @@ impl Blob { ctx: &BuildContext, blob_mgr: &mut BlobManager, blob_writer: &mut dyn Artifact, + work_dir: Option, ) -> Result<()> { match ctx.conversion_type { ConversionType::DirectoryToRafs => { + let is_prefetch = ctx.blob_id == String::from("Prefetch-blob"); let mut chunk_data_buf = vec![0u8; RAFS_MAX_CHUNK_SIZE as usize]; let (inodes, prefetch_entries) = BlobLayout::layout_blob_simple(&ctx.prefetch)?; for (idx, node) in inodes.iter().enumerate() { let mut node = node.lock().unwrap(); let size = node - .dump_node_data(ctx, blob_mgr, blob_writer, &mut chunk_data_buf) + .dump_node_data(ctx, blob_mgr, blob_writer, &mut chunk_data_buf, is_prefetch, work_dir.clone()) .context("failed to dump blob chunks")?; if idx < prefetch_entries { if let Some((_, blob_ctx)) = blob_mgr.get_current_blob() { @@ -43,6 +46,7 @@ impl Blob { } } } + debug!("Dump Node Done"); Self::finalize_blob_data(ctx, blob_mgr, blob_writer)?; } ConversionType::TarToRafs @@ -159,9 +163,11 @@ impl Blob { ) -> Result<()> { // Dump blob meta for v6 when it has chunks or bootstrap is to be inlined. if !blob_ctx.blob_meta_info_enabled || blob_ctx.uncompressed_blob_size == 0 { + debug!("not inlined or blob size = 0"); + dbg!(blob_ctx.uncompressed_blob_size); return Ok(()); } - + debug!("Arrive Here"); // Prepare blob meta information data. let encrypt = ctx.cipher != crypt::Algorithm::None; let cipher_obj = &blob_ctx.cipher_object; @@ -297,7 +303,6 @@ impl Blob { size, )?; } - Ok(()) } } diff --git a/builder/src/core/context.rs b/builder/src/core/context.rs index eb7a77728c8..4bc3d5d4bbb 100644 --- a/builder/src/core/context.rs +++ b/builder/src/core/context.rs @@ -11,11 +11,12 @@ use std::convert::TryFrom; use std::fs::{remove_file, rename, File, OpenOptions}; use std::io::{BufWriter, Cursor, Read, Seek, Write}; use std::mem::size_of; -use std::os::unix::fs::FileTypeExt; use std::path::{Display, Path, PathBuf}; use std::str::FromStr; use std::sync::{Arc, Mutex}; use std::{fmt, fs}; +use std::result::Result::Ok; +use std::os::unix::fs::FileTypeExt; use anyhow::{anyhow, Context, Error, Result}; use nydus_utils::crypt::{self, Cipher, CipherContext}; @@ -712,6 +713,7 @@ impl BlobContext { blob_ctx.blob_toc_size = toc_size; if blob.meta_ci_is_valid() { + debug!("meta ci is valid"); blob_ctx .blob_meta_header .set_ci_compressor(blob.meta_ci_compressor()); @@ -898,6 +900,11 @@ impl BlobManager { } } + /// Set current blob index + pub fn set_current_blob_index(&mut self,index: usize) { + self.current_blob_index = Some(index as u32) + } + fn new_blob_ctx(ctx: &BuildContext) -> Result { let (cipher_object, cipher_ctx) = match ctx.cipher { crypt::Algorithm::None => (Default::default(), None), diff --git a/builder/src/core/layout.rs b/builder/src/core/layout.rs index 9a3ef83ddbe..8fd6e214af6 100644 --- a/builder/src/core/layout.rs +++ b/builder/src/core/layout.rs @@ -16,7 +16,7 @@ impl BlobLayout { let (pre, non_pre) = prefetch.get_file_nodes(); let mut inodes: Vec = pre .into_iter() - .filter(|x| Self::should_dump_node(x.lock().unwrap().deref())) + // .filter(|x| Self::should_dump_node(x.lock().unwrap().deref())) .collect(); let mut non_prefetch_inodes: Vec = non_pre .into_iter() diff --git a/builder/src/core/node.rs b/builder/src/core/node.rs index aa73793973d..a7438a0fcf6 100644 --- a/builder/src/core/node.rs +++ b/builder/src/core/node.rs @@ -221,14 +221,32 @@ impl Node { blob_mgr: &mut BlobManager, blob_writer: &mut dyn Artifact, chunk_data_buf: &mut [u8], + is_prefetch: bool, + work_dir: Option, ) -> Result { - let mut reader = if self.is_reg() { - let file = File::open(self.path()) - .with_context(|| format!("failed to open node file {:?}", self.path()))?; - Some(file) + let mut reader: Option = None; + if is_prefetch { + if let Some(ref dir) = work_dir { + let path = dir.join(self.path().strip_prefix("/").unwrap()); + reader = Some( + File::open(&path) + .expect(format!("Failed to strip prefix:{}", path.display()).as_ref()), + ); + debug!("Replace the reader to path: {}", path.display()); + } } else { - None - }; + reader = if self.is_reg() { + let file = File::open(self.path()) + .with_context(|| format!("failed to open node file {:?}", self.path()))?; + Some(file) + } else { + None + }; + } + + if ctx.blob_id == String::from("Prefetch-blob") { + // replace the reader to the blob + } self.dump_node_data_with_reader(ctx, blob_mgr, blob_writer, reader.as_mut(), chunk_data_buf) } @@ -343,7 +361,6 @@ impl Node { if let Some(h) = inode_hasher { self.inode.set_digest(h.digest_finalize()); } - Ok(blob_size) } @@ -355,7 +372,9 @@ impl Node { ) -> Result<(ChunkWrapper, Option)> { let mut chunk = self.inode.create_chunk(); let mut chunk_info = None; + debug!("compressor: {}", ctx.compressor); if let Some(ref zran) = ctx.blob_zran_generator { + debug!("zran"); let mut zran = zran.lock().unwrap(); zran.start_chunk(ctx.chunk_size as u64)?; reader @@ -368,6 +387,7 @@ impl Node { chunk_info = Some(info); } else if let Some(ref tar_reader) = ctx.blob_tar_reader { // For `tar-ref` case + debug!("tar reader"); let pos = tar_reader.position(); chunk.set_compressed_offset(pos); chunk.set_compressed_size(buf.len() as u32); @@ -376,6 +396,7 @@ impl Node { .read_exact(buf) .with_context(|| format!("failed to read node file {:?}", self.path()))?; } else { + debug!("others"); reader .read_exact(buf) .with_context(|| format!("failed to read node file {:?}", self.path()))?; @@ -994,26 +1015,50 @@ mod tests { let mut chunk_data_buf = [1u8; 32]; node.inode.set_mode(0o755 | libc::S_IFDIR as u32); - let data_size = - node.dump_node_data(&ctx, &mut blob_mgr, &mut blob_writer, &mut chunk_data_buf); + let data_size = node.dump_node_data( + &ctx, + &mut blob_mgr, + &mut blob_writer, + &mut chunk_data_buf, + false, + None, + ); assert!(data_size.is_ok()); assert_eq!(data_size.unwrap(), 0); node.inode.set_mode(0o755 | libc::S_IFLNK as u32); - let data_size = - node.dump_node_data(&ctx, &mut blob_mgr, &mut blob_writer, &mut chunk_data_buf); + let data_size = node.dump_node_data( + &ctx, + &mut blob_mgr, + &mut blob_writer, + &mut chunk_data_buf, + false, + None, + ); assert!(data_size.is_ok()); assert_eq!(data_size.unwrap(), 0); node.inode.set_mode(0o755 | libc::S_IFBLK as u32); - let data_size = - node.dump_node_data(&ctx, &mut blob_mgr, &mut blob_writer, &mut chunk_data_buf); + let data_size = node.dump_node_data( + &ctx, + &mut blob_mgr, + &mut blob_writer, + &mut chunk_data_buf, + false, + None, + ); assert!(data_size.is_ok()); assert_eq!(data_size.unwrap(), 0); node.inode.set_mode(0o755 | libc::S_IFREG as u32); - let data_size = - node.dump_node_data(&ctx, &mut blob_mgr, &mut blob_writer, &mut chunk_data_buf); + let data_size = node.dump_node_data( + &ctx, + &mut blob_mgr, + &mut blob_writer, + &mut chunk_data_buf, + false, + None, + ); assert!(data_size.is_ok()); assert_eq!(data_size.unwrap(), 18); } diff --git a/builder/src/core/overlay.rs b/builder/src/core/overlay.rs index 7626ddd7b1b..a64ebe6da04 100644 --- a/builder/src/core/overlay.rs +++ b/builder/src/core/overlay.rs @@ -71,6 +71,16 @@ pub enum WhiteoutSpec { None, } +impl fmt::Display for WhiteoutSpec { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + WhiteoutSpec::Oci => write!(f, "OCI"), + WhiteoutSpec::Overlayfs => write!(f, "Overlayfs"), + WhiteoutSpec::None => write!(f, "None"), + } + } +} + impl Default for WhiteoutSpec { fn default() -> Self { Self::Oci diff --git a/builder/src/core/prefetch.rs b/builder/src/core/prefetch.rs index b5695e05686..d9698a76008 100644 --- a/builder/src/core/prefetch.rs +++ b/builder/src/core/prefetch.rs @@ -6,13 +6,14 @@ use std::path::PathBuf; use std::str::FromStr; -use anyhow::{anyhow, Context, Error, Result}; +use anyhow::{anyhow, Context, Error, Ok, Result}; use indexmap::IndexMap; use nydus_rafs::metadata::layout::v5::RafsV5PrefetchTable; use nydus_rafs::metadata::layout::v6::{calculate_nid, RafsV6PrefetchTable}; use super::node::Node; use crate::core::tree::TreeNode; +use crate::Tree; /// Filesystem data prefetch policy. #[derive(Clone, Copy, Debug, PartialEq)] @@ -151,6 +152,22 @@ impl Prefetch { }) } + /// Because New method only create the key of patterns + /// This function Search for the nodes of the paths in the key + /// And append to the patterns + /// Now it is assumed that all the patterns are the + /// absolute paths of the regular files + pub fn init(&mut self, tree: &mut Tree) { + let mut nodes = Vec::new(); + for (k, _) in &self.patterns { + let node = tree.get_node(k); + nodes.push(node.unwrap().node.clone()); + } + for node in nodes { + self.insert(&node, &node.lock().unwrap()); + } + } + /// Insert node into the prefetch Vector if it matches prefetch rules, /// while recording the index of matched prefetch pattern, /// or insert it into non-prefetch Vector. diff --git a/builder/src/core/tree.rs b/builder/src/core/tree.rs index d701c2bbd9f..26b487213d5 100644 --- a/builder/src/core/tree.rs +++ b/builder/src/core/tree.rs @@ -22,7 +22,7 @@ use std::path::{Path, PathBuf}; use std::rc::Rc; use std::sync::{Arc, Mutex, MutexGuard}; -use anyhow::{bail, Result}; +use anyhow::{bail, Context, Result}; use nydus_rafs::metadata::chunk::ChunkWrapper; use nydus_rafs::metadata::inode::InodeWrapper; use nydus_rafs::metadata::layout::{bytes_to_os_str, RafsXAttrs}; @@ -247,6 +247,34 @@ impl Tree { Ok(()) } + + pub fn get_prefetch_nodes(&self, files: Vec) -> Result> { + files + .iter() + .map(|file| { + self.get_node(Path::new(file)) + .context(format!("Failed to get node for file: {}", file)) + .and_then(|tree| { + tree.node + .lock() + .map_err(|e| anyhow::anyhow!("Failed to lock node: {}", e)) + .map(|guard| guard.clone()) + }) + }) + .collect() + } + + pub fn test_get_prefetch_nodes(&self, files: Vec) { + // let nodes = self.get_prefetch_nodes(files); + match self.get_prefetch_nodes(files) { + Ok(nodes) => { + nodes.iter().for_each(|node| println!("{}", node.path().display())); + } + Err(e) => { + println!("{}", e); + } + } + } } pub struct MetadataTreeBuilder<'a> { diff --git a/builder/src/core/v6.rs b/builder/src/core/v6.rs index 9dd1091d1e2..10f25edcb61 100644 --- a/builder/src/core/v6.rs +++ b/builder/src/core/v6.rs @@ -485,7 +485,6 @@ impl Node { } prev = Some((blob_idx, offset)); } - // Special optimization to enable page cache sharing for EROFS. let chunk_size = if is_continuous && inode.size() > ctx.chunk_size as u64 { inode.size().next_power_of_two() diff --git a/builder/src/directory.rs b/builder/src/directory.rs index f934f5111ac..f3f6d620b54 100644 --- a/builder/src/directory.rs +++ b/builder/src/directory.rs @@ -148,7 +148,7 @@ impl Builder for DirectoryBuilder { // Dump blob file timing_tracer!( - { Blob::dump(ctx, blob_mgr, blob_writer.as_mut()) }, + { Blob::dump(ctx, blob_mgr, blob_writer.as_mut(), None) }, "dump_blob" )?; diff --git a/builder/src/lib.rs b/builder/src/lib.rs index 54f47e264a7..e7fb1c521eb 100644 --- a/builder/src/lib.rs +++ b/builder/src/lib.rs @@ -202,7 +202,9 @@ fn finalize_blob( } let hash = blob_ctx.blob_hash.clone().finalize(); + debug!("hash: {:x}", hash); let blob_meta_id = if ctx.blob_id.is_empty() { + debug!("IS EMPTY"); format!("{:x}", hash) } else { assert!(!ctx.conversion_type.is_to_ref() || is_tarfs); @@ -247,8 +249,12 @@ fn finalize_blob( if let Some(blob_cache) = ctx.blob_cache_generator.as_ref() { blob_cache.finalize(&blob_ctx.blob_id)?; } + debug!("Blob Id:"); + debug!("{}", blob_ctx.blob_id); + debug!("{}", ctx.blob_id); + ctx.blob_id = blob_ctx.blob_id.clone(); + } - Ok(()) } diff --git a/builder/src/stargz.rs b/builder/src/stargz.rs index d84a2d214a8..f8162a9f1dd 100644 --- a/builder/src/stargz.rs +++ b/builder/src/stargz.rs @@ -860,7 +860,7 @@ impl Builder for StargzBuilder { // Dump blob file timing_tracer!( - { Blob::dump(ctx, blob_mgr, blob_writer.as_mut()) }, + { Blob::dump(ctx, blob_mgr, blob_writer.as_mut(), None) }, "dump_blob" )?; diff --git a/builder/src/tarball.rs b/builder/src/tarball.rs index edc996ac553..30adf81b3eb 100644 --- a/builder/src/tarball.rs +++ b/builder/src/tarball.rs @@ -615,7 +615,7 @@ impl Builder for TarballBuilder { // Dump blob file timing_tracer!( - { Blob::dump(ctx, blob_mgr, blob_writer.as_mut()) }, + { Blob::dump(ctx, blob_mgr, blob_writer.as_mut(), None) }, "dump_blob" )?; diff --git a/output/chunkdict_bootstrap b/output/chunkdict_bootstrap new file mode 100644 index 00000000000..f0b3bdc8aea Binary files /dev/null and b/output/chunkdict_bootstrap differ diff --git a/output/nydus_bootstrap b/output/nydus_bootstrap new file mode 100644 index 00000000000..cc4fd3b8ed3 Binary files /dev/null and b/output/nydus_bootstrap differ diff --git a/output/nydus_prefetch_bootstrap b/output/nydus_prefetch_bootstrap new file mode 100644 index 00000000000..52a9e559079 Binary files /dev/null and b/output/nydus_prefetch_bootstrap differ diff --git a/rafs/src/metadata/direct_v6.rs b/rafs/src/metadata/direct_v6.rs index 3330aea9451..8c3b8e9be5f 100644 --- a/rafs/src/metadata/direct_v6.rs +++ b/rafs/src/metadata/direct_v6.rs @@ -46,9 +46,7 @@ use crate::metadata::layout::v6::{ }; use crate::metadata::layout::{bytes_to_os_str, MetaRange, XattrName, XattrValue}; use crate::metadata::{ - Attr, Entry, Inode, RafsBlobExtraInfo, RafsInode, RafsInodeWalkAction, RafsInodeWalkHandler, - RafsSuperBlock, RafsSuperFlags, RafsSuperInodes, RafsSuperMeta, RAFS_ATTR_BLOCK_SIZE, - RAFS_MAX_NAME, + Attr, Entry, Inode, RafsBlobExtraInfo, RafsInode, RafsInodeWalkAction, RafsInodeWalkHandler, RafsSuperBlock, RafsSuperFlags, RafsSuperInodes, RafsSuperMeta, RAFS_ATTR_BLOCK_SIZE, RAFS_MAX_NAME }; use crate::{MetaType, RafsError, RafsInodeExt, RafsIoReader, RafsResult}; @@ -206,6 +204,7 @@ impl DirectSuperBlockV6 { let mut blob_table = RafsV6BlobTable::new(); let meta = &old_state.meta; r.seek(SeekFrom::Start(meta.blob_table_offset))?; + blob_table.load(r, meta.blob_table_size, meta.chunk_size, meta.flags)?; let blob_extra_infos = rafsv6_load_blob_extra_info(meta, r)?; @@ -1324,6 +1323,7 @@ impl RafsInodeExt for OndiskInodeWrapper { /// It depends on Self::validate() to ensure valid memory layout. fn get_chunk_info(&self, idx: u32) -> Result> { let state = self.state(); + let inode = self.disk_inode(&state); if !self.is_reg() || idx >= self.get_chunk_count() { return Err(enoent!("invalid chunk info")); @@ -1362,6 +1362,7 @@ impl RafsInodeExt for OndiskInodeWrapper { if chunk_map.is_none() { *chunk_map = Some(self.mapping.load_chunk_map()?); } + match chunk_map.as_ref().unwrap().get(chunk_addr) { None => Err(enoent!(format!( "failed to get chunk info for chunk {}/{}/{}", diff --git a/rafs/src/metadata/inode.rs b/rafs/src/metadata/inode.rs index 938b058ae6d..e4964b169b2 100644 --- a/rafs/src/metadata/inode.rs +++ b/rafs/src/metadata/inode.rs @@ -8,6 +8,7 @@ use std::mem::size_of; use std::ops::Deref; use std::sync::Arc; +use fuse_backend_rs::overlayfs::Inode; use nydus_utils::digest::RafsDigest; use crate::metadata::cached_v5::CachedInodeV5; @@ -17,7 +18,7 @@ use crate::metadata::direct_v6::OndiskInodeWrapper as OndiskInodeWrapperV6; use crate::metadata::layout::v5::{RafsV5ChunkInfo, RafsV5Inode}; use crate::metadata::layout::v6::{RafsV6InodeCompact, RafsV6InodeExtended}; use crate::metadata::layout::RafsXAttrs; -use crate::metadata::{Inode, RafsVersion}; +use crate::metadata::{RafsVersion}; use crate::RafsInodeExt; /// An inode object wrapper for different RAFS versions. @@ -165,7 +166,10 @@ impl InodeWrapper { match self { InodeWrapper::V5(i) => i.is_blkdev(), InodeWrapper::V6(i) => i.is_blkdev(), - InodeWrapper::Ref(_i) => unimplemented!(), + // InodeWrapper::Ref(_i) => unimplemented!(), + // TODO: Just Indentify that prefetch node is not + // block device + InodeWrapper::Ref(_) => false, } } @@ -174,7 +178,10 @@ impl InodeWrapper { match self { InodeWrapper::V5(i) => i.is_fifo(), InodeWrapper::V6(i) => i.is_fifo(), - InodeWrapper::Ref(_i) => unimplemented!(), + // TODO: Just Indentify that prefetch node is not + // A fifo + InodeWrapper::Ref(_) => false, + // InodeWrapper::Ref(_i) => unimplemented!(), } } @@ -564,7 +571,8 @@ impl InodeWrapper { match self { InodeWrapper::V5(_) => ChunkWrapper::V5(RafsV5ChunkInfo::new()), InodeWrapper::V6(_) => ChunkWrapper::V6(RafsV5ChunkInfo::new()), - InodeWrapper::Ref(_i) => unimplemented!(), + // InodeWrapper::Ref(_i) => unimplemented!(), + InodeWrapper::Ref(_) => ChunkWrapper::V6(RafsV5ChunkInfo::new()), } } diff --git a/rafs/src/metadata/layout/v6.rs b/rafs/src/metadata/layout/v6.rs index 6a64607fb07..4f02a6ccd20 100644 --- a/rafs/src/metadata/layout/v6.rs +++ b/rafs/src/metadata/layout/v6.rs @@ -1328,7 +1328,6 @@ impl RafsV6Device { } Err(_) => return Err(einval!("blob_id in RAFS v6 device entry is invalid")), } - if self.blocks() == 0 { let msg = format!("invalid blocks {} in Rafs v6 device entry", self.blocks()); return Err(einval!(msg)); @@ -1691,7 +1690,6 @@ impl RafsV6Blob { ); return false; } - let blob_features = match BlobFeatures::try_from(self.features) { Ok(v) => v, Err(_) => return false, @@ -1773,7 +1771,7 @@ impl RafsV6Blob { #[derive(Clone, Debug, Default)] pub struct RafsV6BlobTable { /// Base blob information array. - entries: Vec>, + pub entries: Vec>, } impl RafsV6BlobTable { diff --git a/src/bin/nydus-image/deduplicate.rs b/src/bin/nydus-image/deduplicate.rs index c28130e023f..e34c6e63189 100644 --- a/src/bin/nydus-image/deduplicate.rs +++ b/src/bin/nydus-image/deduplicate.rs @@ -186,7 +186,6 @@ pub fn update_ctx_from_parent_bootstrap( bootstrap_path: &PathBuf, ) -> Result<()> { let (sb, _) = RafsSuper::load_from_file(bootstrap_path, Arc::new(ConfigV2::default()), false)?; - // Obtain the features of the first blob to use as the features for the blobs in chunkdict. if let Some(first_blob) = sb.superblock.get_blob_infos().first() { ctx.blob_features = first_blob.features(); diff --git a/src/bin/nydus-image/main.rs b/src/bin/nydus-image/main.rs index 5fa3a3a3c10..344a83c10f3 100644 --- a/src/bin/nydus-image/main.rs +++ b/src/bin/nydus-image/main.rs @@ -21,6 +21,7 @@ use std::convert::TryFrom; use std::fs::{self, metadata, DirEntry, File, OpenOptions}; use std::os::unix::fs::FileTypeExt; use std::path::{Path, PathBuf}; +use std::result::Result::Ok; use std::sync::{Arc, Mutex}; use anyhow::{bail, Context, Result}; @@ -30,11 +31,14 @@ use nix::unistd::{getegid, geteuid}; use nydus::{get_build_time_info, setup_logging}; use nydus_api::{BuildTimeInfo, ConfigV2, LocalFsConfig}; use nydus_builder::{ - parse_chunk_dict_arg, ArtifactStorage, BlobCacheGenerator, BlobCompactor, BlobManager, - BootstrapManager, BuildContext, BuildOutput, Builder, ChunkdictBlobInfo, ChunkdictChunkInfo, - ConversionType, DirectoryBuilder, Feature, Features, Generator, HashChunkDict, Merger, - Prefetch, PrefetchPolicy, StargzBuilder, TarballBuilder, WhiteoutSpec, + parse_chunk_dict_arg, ArtifactStorage, BlobCacheGenerator, BlobCompactor, + BlobManager, BootstrapManager, BuildContext, BuildOutput, Builder, ChunkdictBlobInfo, + ChunkdictChunkInfo, ConversionType, DirectoryBuilder, Feature, Features, Generator, + HashChunkDict, Merger, Prefetch, PrefetchPolicy, StargzBuilder, TarballBuilder, Tree, + WhiteoutSpec, }; + +use nydus_rafs::metadata::layout::v6::RafsV6BlobTable; use nydus_rafs::metadata::{MergeError, RafsSuper, RafsSuperConfig, RafsVersion}; use nydus_storage::backend::localfs::LocalFs; use nydus_storage::backend::BlobBackend; @@ -48,6 +52,7 @@ use nydus_utils::{ }; use serde::{Deserialize, Serialize}; +use crate::prefetch::update_ctx_from_bootstrap; use crate::unpack::{OCIUnpacker, Unpacker}; use crate::validator::Validator; @@ -58,6 +63,7 @@ use std::str::FromStr; mod deduplicate; mod inspect; +mod prefetch; mod stat; mod unpack; mod validator; @@ -529,6 +535,39 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { .arg(arg_output_json.clone()), ); + let app = app.subcommand( + App::new("optimize") + .about("Optimize By Prefetch") + .arg( + Arg::new("bootstrap") + .help("File path of RAFS metadata") + .short('B') + .long("bootstrap") + .required(true), + ) + .arg( + Arg::new("prefetch-files") + .long("prefetch-files") + .short('p') + .help("Prefetch files") + .action(ArgAction::Append), + ) + // .arg( + // Arg::new("blobs-dir-path") + // .required(true), + // ) + .arg(arg_config.clone()) + .arg( + Arg::new("blob-dir") + .long("blob-dir") + .short('D') + .conflicts_with("config") + .help( + "Directory for localfs storage backend, hosting data blobs and cache files", + ), + ), + ); + #[cfg(target_os = "linux")] let app = app.subcommand( App::new("export") @@ -808,6 +847,8 @@ fn main() -> Result<()> { Command::compact(matches, &build_info) } else if let Some(matches) = cmd.subcommand_matches("unpack") { Command::unpack(matches) + } else if let Some(matches) = cmd.subcommand_matches("optimize") { + Command::optimize(matches) } else { #[cfg(target_os = "linux")] if let Some(matches) = cmd.subcommand_matches("export") { @@ -1090,6 +1131,7 @@ impl Command { features, encrypt, ); + build_ctx.set_fs_version(version); build_ctx.set_chunk_size(chunk_size); build_ctx.set_batch_size(batch_size); @@ -1183,6 +1225,7 @@ impl Command { | ConversionType::TarToStargz | ConversionType::TargzToStargz => unimplemented!(), }; + let build_output = timing_tracer!( { builder @@ -1561,6 +1604,44 @@ impl Command { Ok(()) } + fn optimize(matches: &ArgMatches) -> Result<()> { + let blobs_dir_path = Self::get_blobs_dir(matches).unwrap(); + debug!("Blobs Dir Path: {}", blobs_dir_path.display()); + let bootstrap_path = Self::get_bootstrap(matches)?; + let config = Self::get_configuration(matches)?; + config.internal.set_blob_accessible(true); + // let mut validator = Validator::new(bootstrap_path, config)?; + // validator.get_prefetch_nodes()?; + let mut build_ctx = BuildContext { + // prefetch: Self::get_prefetch(matches)?, + prefetch: Prefetch::new(PrefetchPolicy::Fs)?, + ..Default::default() + }; + + let sb = update_ctx_from_bootstrap(&mut build_ctx, config, bootstrap_path)?; + let mut tree = Tree::from_bootstrap(&sb, &mut ()).unwrap(); + + build_ctx.prefetch.init(&mut tree); + + let bootstrap_path = ArtifactStorage::SingleFile(PathBuf::from("nydus_prefetch_bootstrap")); + let mut bootstrap_mgr = BootstrapManager::new(Some(bootstrap_path), None); + let blobs = sb.superblock.get_blob_infos(); + let mut rafsv6table = RafsV6BlobTable::new(); + for blob in &blobs { + rafsv6table.entries.push(blob.clone()); + } + + Generator::generate_prefetch( + &mut tree, + &mut build_ctx, + &mut bootstrap_mgr, + &mut rafsv6table, + blobs_dir_path.to_path_buf(), + ) + .unwrap(); + Ok(()) + } + fn inspect(matches: &ArgMatches) -> Result<()> { let bootstrap_path = Self::get_bootstrap(matches)?; let mut config = Self::get_configuration(matches)?; @@ -1661,6 +1742,13 @@ impl Command { } } + fn get_blobs_dir(matches: &ArgMatches) -> Result<&Path> { + match matches.get_one::("blob-dir") { + Some(s) => Ok(Path::new(s)), + None => bail!("missing parameter `blob-dir`") + } + } + fn get_bootstrap_storage(matches: &ArgMatches) -> Result { if let Some(s) = matches.get_one::("bootstrap") { Ok(ArtifactStorage::SingleFile(s.into())) diff --git a/src/bin/nydus-image/optimize.rs b/src/bin/nydus-image/optimize.rs new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/bin/nydus-image/prefetch.rs b/src/bin/nydus-image/prefetch.rs new file mode 100644 index 00000000000..a9325db81ea --- /dev/null +++ b/src/bin/nydus-image/prefetch.rs @@ -0,0 +1,27 @@ +use anyhow::{Context, Result}; +use nydus_api::ConfigV2; +use nydus_builder::{BuildContext, ConversionType}; +use nydus_rafs::metadata::RafsSuper; +use nydus_rafs::metadata::RafsVersion; +use std::result::Result::Ok; +use std::{path::Path, sync::Arc}; + +pub fn update_ctx_from_bootstrap( + ctx: &mut BuildContext, + config: Arc, + bootstrap_path: &Path, +) -> Result { + let (sb, _) = RafsSuper::load_from_file(bootstrap_path, config, false)?; + + ctx.blob_features = sb.superblock.get_blob_infos().first().unwrap().features(); + + let config = sb.meta.get_config(); + if config.is_tarfs_mode { + ctx.conversion_type = ConversionType::TarToRafs; + } + + ctx.fs_version = + RafsVersion::try_from(sb.meta.version).context("Failed to get RAFS version")?; + ctx.compressor = config.compressor; + Ok(sb) +} diff --git a/src/bin/nydus-image/validator.rs b/src/bin/nydus-image/validator.rs index 95e8355c277..db218ebdc34 100644 --- a/src/bin/nydus-image/validator.rs +++ b/src/bin/nydus-image/validator.rs @@ -4,8 +4,8 @@ //! Validator for RAFS format -use std::path::Path; use std::sync::Arc; +use std::path::Path; use anyhow::{Context, Result}; use nydus_api::ConfigV2; diff --git a/storage/src/backend/localfs.rs b/storage/src/backend/localfs.rs index 6168a1903da..e8af44d9d2c 100644 --- a/storage/src/backend/localfs.rs +++ b/storage/src/backend/localfs.rs @@ -163,6 +163,7 @@ impl LocalFs { } let blob_file_path = self.get_blob_path(blob_id)?; + println!("blob file path: {}", blob_file_path.display()); let file = OpenOptions::new() .read(true) .open(&blob_file_path) diff --git a/storage/src/device.rs b/storage/src/device.rs index 6e6cbc15ed6..20684dade70 100644 --- a/storage/src/device.rs +++ b/storage/src/device.rs @@ -229,6 +229,36 @@ impl BlobInfo { blob_info } + /// Set the chunk count + pub fn set_chunk_count(&mut self, count: usize) { + self.chunk_count = count as u32; + } + + /// Set compressed size + pub fn set_compressed_size(&mut self, size: usize) { + self.compressed_size = size as u64; + } + + /// Set uncompressed size + pub fn set_uncompressed_size(&mut self, size: usize) { + self.uncompressed_size = size as u64; + } + + /// Set meta ci compressed size + pub fn set_meta_ci_compressed_size(&mut self, size: usize) { + self.meta_ci_compressed_size = size as u64; + } + + /// Set meta ci uncompressed size + pub fn set_meta_ci_uncompressed_size(&mut self, size: usize) { + self.meta_ci_uncompressed_size = size as u64; + } + + /// Set meta ci offset + pub fn set_meta_ci_offset(&mut self, size: usize) { + self.meta_ci_offset = size as u64; + } + /// Set the is_chunkdict_generated flag. pub fn set_chunkdict_generated(&mut self, is_chunkdict_generated: bool) { self.is_chunkdict_generated = is_chunkdict_generated; @@ -258,6 +288,11 @@ impl BlobInfo { self.blob_id.clone() } + /// Set the blob id + pub fn set_blob_id(&mut self, blob_id: String) { + self.blob_id = blob_id + } + /// Get raw blob id, without special handling of `inlined-meta` case. pub fn raw_blob_id(&self) -> &str { &self.blob_id @@ -274,6 +309,10 @@ impl BlobInfo { // For RAFS v6 if self.has_feature(BlobFeatures::HAS_TAR_HEADER) { // There's a tar header between chunk data and compression information. + println!("SELF BLOB ID: {}", self.blob_id); + println!("META CI OFFSET: {}", self.meta_ci_offset); + println!("SELF CI COMPRESSED DATA SIZE: {}", self.meta_ci_compressed_size); + println!("COMPRESSED DATA SIZE: 0x{:x}", self.meta_ci_offset - 0x200); self.meta_ci_offset - 0x200 } else { self.meta_ci_offset