diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 984792c5bb..1189ba3e29 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -432,7 +432,7 @@ impl Chain { // ensure the view is consistent. txhashset::extending_readonly(&mut txhashset, |extension| { // TODO - is this rewind guaranteed to be redundant now? - extension.rewind(&header, &header, true, true, true)?; + extension.rewind(&header, &header)?; extension.validate(&header, skip_rproofs, &NoStatus)?; Ok(()) }) @@ -502,7 +502,7 @@ impl Chain { { let mut txhashset = self.txhashset.write().unwrap(); txhashset::extending_readonly(&mut txhashset, |extension| { - extension.rewind(&header, &head_header, true, true, true)?; + extension.rewind(&header, &head_header)?; extension.snapshot(&header)?; Ok(()) })?; @@ -530,7 +530,7 @@ impl Chain { where T: TxHashsetWriteStatus, { - self.txhashset_lock.lock().unwrap(); + let _ = self.txhashset_lock.lock().unwrap(); status.on_setup(); let head = self.head().unwrap(); let header_head = self.get_header_head().unwrap(); @@ -550,7 +550,7 @@ impl Chain { txhashset::extending(&mut txhashset, &mut batch, |extension| { // TODO do we need to rewind here? We have no blocks to rewind // (and we need them for the pos to unremove) - extension.rewind(&header, &header, true, true, true)?; + extension.rewind(&header, &header)?; extension.validate(&header, false, status)?; extension.rebuild_index()?; Ok(()) @@ -816,7 +816,7 @@ fn setup_head( let header = store.get_block_header(&head.last_block_h)?; let res = txhashset::extending(txhashset, &mut batch, |extension| { - extension.rewind(&header, &head_header, true, true, true)?; + extension.rewind(&header, &head_header)?; extension.validate_roots(&header)?; debug!( LOGGER, diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index 6b649cb7e5..82df9e194d 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -500,7 +500,7 @@ pub fn rewind_and_apply_fork( ); // rewind the sum trees up to the forking block - ext.rewind(&forked_header, &head_header, true, true, true)?; + ext.rewind(&forked_header, &head_header)?; trace!( LOGGER, diff --git a/chain/src/txhashset.rs b/chain/src/txhashset.rs index a58f7e4ace..1e56103d1a 100644 --- a/chain/src/txhashset.rs +++ b/chain/src/txhashset.rs @@ -65,11 +65,12 @@ where fn new( root_dir: String, file_name: &str, + prunable: bool, header: Option<&BlockHeader>, ) -> Result, Error> { let path = Path::new(&root_dir).join(TXHASHSET_SUBDIR).join(file_name); fs::create_dir_all(path.clone())?; - let be = PMMRBackend::new(path.to_str().unwrap().to_string(), header)?; + let be = PMMRBackend::new(path.to_str().unwrap().to_string(), prunable, header)?; let sz = be.unpruned_size()?; Ok(PMMRHandle { backend: be, @@ -120,9 +121,9 @@ impl TxHashSet { fs::create_dir_all(kernel_file_path.clone())?; Ok(TxHashSet { - output_pmmr_h: PMMRHandle::new(root_dir.clone(), OUTPUT_SUBDIR, header)?, - rproof_pmmr_h: PMMRHandle::new(root_dir.clone(), RANGE_PROOF_SUBDIR, header)?, - kernel_pmmr_h: PMMRHandle::new(root_dir.clone(), KERNEL_SUBDIR, None)?, + output_pmmr_h: PMMRHandle::new(root_dir.clone(), OUTPUT_SUBDIR, true, header)?, + rproof_pmmr_h: PMMRHandle::new(root_dir.clone(), RANGE_PROOF_SUBDIR, true, header)?, + kernel_pmmr_h: PMMRHandle::new(root_dir.clone(), KERNEL_SUBDIR, false, None)?, commit_index, }) } @@ -461,9 +462,6 @@ impl<'a> Extension<'a> { kernel_pos, &rewind_add_pos, rewind_rm_pos, - true, - true, - true, )?; Ok(()) } @@ -725,7 +723,7 @@ impl<'a> Extension<'a> { // rewind to the specified block for a consistent view let head_header = self.commit_index.head_header()?; - self.rewind(block_header, &head_header, true, true, true)?; + self.rewind(block_header, &head_header)?; // then calculate the Merkle Proof based on the known pos let pos = self.batch.get_output_pos(&output.commit)?; @@ -757,9 +755,6 @@ impl<'a> Extension<'a> { &mut self, block_header: &BlockHeader, head_header: &BlockHeader, - rewind_utxo: bool, - rewind_kernel: bool, - rewind_rangeproof: bool, ) -> Result<(), Error> { trace!( LOGGER, @@ -787,12 +782,7 @@ impl<'a> Extension<'a> { block_header.kernel_mmr_size, &rewind_add_pos, &rewind_rm_pos.1, - rewind_utxo, - rewind_kernel, - rewind_rangeproof, - )?; - - Ok(()) + ) } /// Rewinds the MMRs to the provided positions, given the output and @@ -803,9 +793,6 @@ impl<'a> Extension<'a> { kernel_pos: u64, rewind_add_pos: &Bitmap, rewind_rm_pos: &Bitmap, - rewind_utxo: bool, - rewind_kernel: bool, - rewind_rproof: bool, ) -> Result<(), Error> { trace!( LOGGER, @@ -819,22 +806,15 @@ impl<'a> Extension<'a> { // been sync'd to disk. self.new_output_commits.retain(|_, &mut v| v <= output_pos); - if rewind_utxo { - self.output_pmmr - .rewind(output_pos, rewind_add_pos, rewind_rm_pos) - .map_err(&ErrorKind::TxHashSetErr)?; - } - if rewind_rproof { - self.rproof_pmmr - .rewind(output_pos, rewind_add_pos, rewind_rm_pos) - .map_err(&ErrorKind::TxHashSetErr)?; - } - if rewind_kernel { - self.kernel_pmmr - .rewind(kernel_pos, rewind_add_pos, rewind_rm_pos) - .map_err(&ErrorKind::TxHashSetErr)?; - } - + self.output_pmmr + .rewind(output_pos, rewind_add_pos, rewind_rm_pos) + .map_err(&ErrorKind::TxHashSetErr)?; + self.rproof_pmmr + .rewind(output_pos, rewind_add_pos, rewind_rm_pos) + .map_err(&ErrorKind::TxHashSetErr)?; + self.kernel_pmmr + .rewind(kernel_pos, rewind_add_pos, rewind_rm_pos) + .map_err(&ErrorKind::TxHashSetErr)?; Ok(()) } @@ -1068,14 +1048,17 @@ impl<'a> Extension<'a> { // fast sync where a reorg past the horizon could allow a whole rewrite of // the kernel set. let mut current = header.clone(); + let empty_bitmap = Bitmap::create(); loop { current = self.commit_index.get_block_header(¤t.previous)?; if current.height == 0 { break; } - let head_header = self.commit_index.head_header()?; - // rewinding further and further back - self.rewind(¤t, &head_header, false, true, false)?; + // rewinding kernels only further and further back + self.kernel_pmmr + .rewind(current.kernel_mmr_size, &empty_bitmap, &empty_bitmap) + .map_err(&ErrorKind::TxHashSetErr)?; + if self.kernel_pmmr.root() != current.kernel_root { return Err(ErrorKind::InvalidTxHashSet(format!( "Kernel root at {} does not match", @@ -1085,6 +1068,7 @@ impl<'a> Extension<'a> { } Ok(()) } + } /// Packages the txhashset data files into a zip and returns a Read to the diff --git a/store/src/pmmr.rs b/store/src/pmmr.rs index d024391db2..a0b210faaa 100644 --- a/store/src/pmmr.rs +++ b/store/src/pmmr.rs @@ -57,6 +57,7 @@ where T: PMMRable, { data_dir: String, + prunable: bool, hash_file: AppendOnlyFile, data_file: AppendOnlyFile, leaf_set: LeafSet, @@ -76,8 +77,10 @@ where if let Some(elem) = d.1 { self.data_file.append(&mut ser::ser_vec(&elem).unwrap()); - // Add the new position to our leaf_set. - self.leaf_set.add(position); + if self.prunable { + // Add the new position to our leaf_set. + self.leaf_set.add(position); + } } } Ok(()) @@ -137,7 +140,7 @@ where /// Return None if pos is a leaf and it has been removed (or pruned or /// compacted). fn get_hash(&self, pos: u64) -> Option<(Hash)> { - if pmmr::is_leaf(pos) && !self.leaf_set.includes(pos) { + if self.prunable && pmmr::is_leaf(pos) && !self.leaf_set.includes(pos) { return None; } self.get_from_file(pos) @@ -149,7 +152,7 @@ where if !pmmr::is_leaf(pos) { return None; } - if !self.leaf_set.includes(pos) { + if self.prunable && !self.leaf_set.includes(pos) { return None; } self.get_data_from_file(pos) @@ -163,7 +166,9 @@ where rewind_rm_pos: &Bitmap, ) -> Result<(), String> { // First rewind the leaf_set with the necessary added and removed positions. - self.leaf_set.rewind(rewind_add_pos, rewind_rm_pos); + if self.prunable { + self.leaf_set.rewind(rewind_add_pos, rewind_rm_pos); + } // Rewind the hash file accounting for pruned/compacted pos let shift = self.prune_list.get_shift(position); @@ -183,6 +188,7 @@ where /// Remove by insertion position. fn remove(&mut self, pos: u64) -> Result<(), String> { + assert!(self.prunable, "Remove on non-prunable MMR"); self.leaf_set.remove(pos); Ok(()) } @@ -218,60 +224,30 @@ where { /// Instantiates a new PMMR backend. /// Use the provided dir to store its files. - pub fn new(data_dir: String, header: Option<&BlockHeader>) -> io::Result> { + pub fn new( + data_dir: String, + prunable: bool, + header: Option<&BlockHeader> + ) -> io::Result> { + let hash_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_HASH_FILE))?; let data_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE))?; let leaf_set_path = format!("{}/{}", data_dir, PMMR_LEAF_FILE); - // If we received a rewound "snapshot" leaf_set file - // move it into place so we use it. + // If we received a rewound "snapshot" leaf_set file move it into + // place so we use it. if let Some(header) = header { let leaf_snapshot_path = format!("{}/{}.{}", data_dir, PMMR_LEAF_FILE, header.hash()); LeafSet::copy_snapshot(leaf_set_path.clone(), leaf_snapshot_path.clone())?; } - // If we need to migrate legacy prune_list do it here before we start. - // Do *not* migrate if we already have a non-empty prune_list. - let mut prune_list = PruneList::open(format!("{}/{}", data_dir, PMMR_PRUN_FILE))?; - let legacy_prune_list_path = format!("{}/{}", data_dir, LEGACY_PRUNED_FILE); - if prune_list.is_empty() && Path::new(&legacy_prune_list_path).exists() { - debug!(LOGGER, "pmmr: migrating prune_list -> bitmap prune_list"); - let legacy_prune_pos = read_ordered_vec(legacy_prune_list_path, 8)?; - for x in legacy_prune_pos { - prune_list.add(x); - } - prune_list.flush()?; - } - - // If we need to migrate legacy rm_log to a new leaf_set do it here before we - // start. Do *not* migrate if we already have a non-empty leaf_set. - let mut leaf_set = LeafSet::open(leaf_set_path.clone())?; - let legacy_rm_log_path = format!("{}/{}", data_dir, LEGACY_RM_LOG_FILE); - if leaf_set.is_empty() && Path::new(&legacy_rm_log_path).exists() { - debug!(LOGGER, "pmmr: migrating rm_log -> leaf_set"); - let mut rm_log = RemoveLog::open(legacy_rm_log_path)?; - if let Some(header) = header { - // Rewind the rm_log back to the height of the header we care about. - debug!( - LOGGER, - "pmmr: first rewinding rm_log to height {}", header.height - ); - rm_log.rewind(header.height as u32)?; - } - - let last_pos = { - let total_shift = prune_list.get_total_shift(); - let record_len = 32; - let sz = hash_file.size()?; - sz / record_len + total_shift - }; - - migrate_rm_log(&mut leaf_set, &rm_log, &prune_list, last_pos)?; - } + let prune_list = PruneList::open(format!("{}/{}", data_dir, PMMR_PRUN_FILE))?; + let leaf_set = LeafSet::open(leaf_set_path.clone())?; Ok(PMMRBackend { data_dir, + prunable, hash_file, data_file, leaf_set, @@ -369,6 +345,8 @@ where where P: Fn(&[u8]), { + assert!(self.prunable, "Trying to compact a non-prunable PMMR"); + // Paths for tmp hash and data files. let tmp_prune_file_hash = format!("{}/{}.hashprune", self.data_dir, PMMR_HASH_FILE); let tmp_prune_file_data = format!("{}/{}.dataprune", self.data_dir, PMMR_DATA_FILE); @@ -500,25 +478,3 @@ fn removed_excl_roots(removed: Bitmap) -> Bitmap { .collect() } -fn migrate_rm_log( - leaf_set: &mut LeafSet, - rm_log: &RemoveLog, - prune_list: &PruneList, - last_pos: u64, -) -> io::Result<()> { - info!( - LOGGER, - "Migrating rm_log -> leaf_set. Might take a little while... {} pos", last_pos - ); - - // check every leaf - // if not pruned and not removed, add it to the leaf_set - for x in 1..=last_pos { - if pmmr::is_leaf(x) && !prune_list.is_pruned(x) && !rm_log.includes(x) { - leaf_set.add(x); - } - } - - leaf_set.flush()?; - Ok(()) -} diff --git a/store/tests/pmmr.rs b/store/tests/pmmr.rs index 277bc50760..8bba43dedc 100644 --- a/store/tests/pmmr.rs +++ b/store/tests/pmmr.rs @@ -29,7 +29,7 @@ use store::types::prune_noop; #[test] fn pmmr_append() { let (data_dir, elems) = setup("append"); - let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap(); // adding first set of 4 elements and sync let mut mmr_size = load(0, &elems[0..4], &mut backend); @@ -79,7 +79,7 @@ fn pmmr_compact_leaf_sibling() { let (data_dir, elems) = setup("compact_leaf_sibling"); // setup the mmr store with all elements - let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap(); let mmr_size = load(0, &elems[..], &mut backend); backend.sync().unwrap(); @@ -151,7 +151,7 @@ fn pmmr_prune_compact() { let (data_dir, elems) = setup("prune_compact"); // setup the mmr store with all elements - let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap(); let mmr_size = load(0, &elems[..], &mut backend); backend.sync().unwrap(); @@ -201,7 +201,7 @@ fn pmmr_reload() { let (data_dir, elems) = setup("reload"); // set everything up with an initial backend - let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap(); let mmr_size = load(0, &elems[..], &mut backend); @@ -259,7 +259,7 @@ fn pmmr_reload() { // create a new backend referencing the data files // and check everything still works as expected { - let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap(); assert_eq!(backend.unpruned_size().unwrap(), mmr_size); { let pmmr: PMMR = PMMR::at(&mut backend, mmr_size); @@ -297,7 +297,7 @@ fn pmmr_reload() { #[test] fn pmmr_rewind() { let (data_dir, elems) = setup("rewind"); - let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), true, None).unwrap(); // adding elements and keeping the corresponding root let mut mmr_size = load(0, &elems[0..4], &mut backend); @@ -426,7 +426,7 @@ fn pmmr_rewind() { #[test] fn pmmr_compact_single_leaves() { let (data_dir, elems) = setup("compact_single_leaves"); - let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), true, None).unwrap(); let mmr_size = load(0, &elems[0..5], &mut backend); backend.sync().unwrap(); @@ -462,7 +462,7 @@ fn pmmr_compact_single_leaves() { #[test] fn pmmr_compact_entire_peak() { let (data_dir, elems) = setup("compact_entire_peak"); - let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), true, None).unwrap(); let mmr_size = load(0, &elems[0..5], &mut backend); backend.sync().unwrap(); @@ -503,7 +503,7 @@ fn pmmr_compact_entire_peak() { #[test] fn pmmr_compact_horizon() { let (data_dir, elems) = setup("compact_horizon"); - let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), true, None).unwrap(); let mmr_size = load(0, &elems[..], &mut backend); backend.sync().unwrap(); @@ -586,7 +586,7 @@ fn pmmr_compact_horizon() { { // recreate backend let backend = - store::pmmr::PMMRBackend::::new(data_dir.to_string(), None).unwrap(); + store::pmmr::PMMRBackend::::new(data_dir.to_string(), true, None).unwrap(); assert_eq!(backend.data_size().unwrap(), 19); assert_eq!(backend.hash_size().unwrap(), 35); @@ -601,7 +601,7 @@ fn pmmr_compact_horizon() { { let mut backend = - store::pmmr::PMMRBackend::::new(data_dir.to_string(), None).unwrap(); + store::pmmr::PMMRBackend::::new(data_dir.to_string(), true, None).unwrap(); { let mut pmmr: PMMR = PMMR::at(&mut backend, mmr_size); @@ -620,7 +620,7 @@ fn pmmr_compact_horizon() { { // recreate backend let backend = - store::pmmr::PMMRBackend::::new(data_dir.to_string(), None).unwrap(); + store::pmmr::PMMRBackend::::new(data_dir.to_string(), true, None).unwrap(); // 0010012001001230 @@ -646,7 +646,7 @@ fn compact_twice() { let (data_dir, elems) = setup("compact_twice"); // setup the mmr store with all elements - let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap(); let mmr_size = load(0, &elems[..], &mut backend); backend.sync().unwrap();