From 54889fa0ca284ceee2a2538a8c0515d6428e7ed4 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 10 Feb 2020 17:28:47 +0000 Subject: [PATCH 01/11] kernel pos index --- chain/src/store.rs | 29 ++++++++++++++++++++++++++ chain/src/txhashset/txhashset.rs | 35 +++++++++++++++++++++++++++----- 2 files changed, 59 insertions(+), 5 deletions(-) diff --git a/chain/src/store.rs b/chain/src/store.rs index 540de65358..cf70a792cb 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -34,6 +34,7 @@ const BLOCK_PREFIX: u8 = b'b'; const HEAD_PREFIX: u8 = b'H'; const TAIL_PREFIX: u8 = b'T'; const OUTPUT_POS_PREFIX: u8 = b'p'; +const KERNEL_POS_PREFIX: u8 = b'k'; const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B'; const BLOCK_SUMS_PREFIX: u8 = b'M'; const BLOCK_SPENT_PREFIX: u8 = b'S'; @@ -275,6 +276,34 @@ impl<'a> Batch<'a> { self.db.iter(&key) } + /// Save kernel_pos and block height to index. + pub fn save_kernel_pos_height( + &self, + excess: &Commitment, + pos: u64, + height: u64, + ) -> Result<(), Error> { + self.db.put_ser( + &to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())[..], + &(pos, height), + ) + } + + /// Iterator over the kernel_pos index. + pub fn kernel_pos_iter(&self) -> Result, Error> { + let key = to_key(KERNEL_POS_PREFIX, &mut "".to_string().into_bytes()); + self.db.iter(&key) + } + + /// Get kernel_pos and block height from index. + pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result<(u64, u64), Error> { + option_to_not_found( + self.db + .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())), + || format!("Kernel pos for excess commit: {:?}", excess), + ) + } + /// Get output_pos from index. pub fn get_output_pos(&self, commit: &Commitment) -> Result { match self.get_output_pos_height(commit)? { diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 48aa0f80f9..fd2165fcb8 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -376,6 +376,12 @@ impl TxHashSet { .backend .check_compact(horizon_header.output_mmr_size, &rewind_rm_pos)?; + // TODO - Does this actually belong here? + // Note: We have not yet updated chain "tail" (updated when we remove old blocks from db). + // So technically we leave too many entries in the kernel_pos index? + debug!("txhashset: compact kernel_pos index..."); + self.compact_kernel_pos_index(batch)?; + debug!("txhashset: ... compaction finished"); Ok(()) @@ -465,6 +471,23 @@ impl TxHashSet { ); Ok(()) } + + // TODO - Does this belong here? + fn compact_kernel_pos_index(&self, batch: &Batch<'_>) -> Result<(), Error> { + let now = Instant::now(); + let tail = batch.tail()?; + let deleted = batch + .kernel_pos_iter()? + .filter(|(_, (_, height))| *height < tail.height) + .map(|(key, _)| batch.delete(&key)) + .count(); + debug!( + "compact_kernel_pos_index: deleted {} entries from the index, took {}s", + deleted, + now.elapsed().as_secs(), + ); + Ok(()) + } } /// Starts a new unit of work to extend (or rewind) the chain with additional @@ -958,7 +981,8 @@ impl<'a> Extension<'a> { batch.save_spent_index(&b.hash(), &spent)?; for kernel in b.kernels() { - self.apply_kernel(kernel)?; + let pos = self.apply_kernel(kernel)?; + batch.save_kernel_pos_height(&kernel.excess(), pos, b.header.height)?; } // Update our BitmapAccumulator based on affected outputs (both spent and created). @@ -1055,11 +1079,12 @@ impl<'a> Extension<'a> { } /// Push kernel onto MMR (hash and data files). - fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<(), Error> { - self.kernel_pmmr + fn apply_kernel(&mut self, kernel: &TxKernel) -> Result { + let pos = self + .kernel_pmmr .push(kernel) - .map_err(&ErrorKind::TxHashSetErr)?; - Ok(()) + .map_err(ErrorKind::TxHashSetErr)?; + Ok(pos) } /// Build a Merkle proof for the given output and the block From f76b420b99428cc1cb9e74d199bfc39d29dfc35c Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 10 Feb 2020 19:44:57 +0000 Subject: [PATCH 02/11] test coverage for kernel_pos index behavior during chain compaction --- chain/src/chain.rs | 23 ++++++++++++------ chain/src/store.rs | 9 +++++++ chain/src/txhashset/txhashset.rs | 10 ++++++++ chain/tests/mine_simple_chain.rs | 40 ++++++++++++++++++++++++++++---- 4 files changed, 70 insertions(+), 12 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index a026a6d619..d6055114bb 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1082,8 +1082,11 @@ impl Chain { // current "head" and "tail" height to our cut-through horizon and // allowing an additional 60 blocks in height before allowing a further compaction. if let (Ok(tail), Ok(head)) = (self.tail(), self.head()) { - let horizon = global::cut_through_horizon() as u64; - let threshold = horizon.saturating_add(60); + let threshold = if global::is_production_mode() { + global::cut_through_horizon().saturating_add(60) + } else { + global::cut_through_horizon() + }; let next_compact = tail.height.saturating_add(threshold); if next_compact > head.height { debug!( @@ -1099,6 +1102,11 @@ impl Chain { let mut txhashset = self.txhashset.write(); let batch = self.store.batch()?; + // Remove historical blocks from the db unless we are running in archive mode. + if !self.archive_mode { + self.remove_historical_blocks(&header_pmmr, &mut batch)?; + } + // Compact the txhashset itself (rewriting the pruned backend files). { let head_header = batch.head_header()?; @@ -1111,11 +1119,6 @@ impl Chain { txhashset.compact(&horizon_header, &batch)?; } - // If we are not in archival mode remove historical blocks from the db. - if !self.archive_mode { - self.remove_historical_blocks(&header_pmmr, &batch)?; - } - // Make sure our output_pos index is consistent with the UTXO set. txhashset.init_output_pos_index(&header_pmmr, &batch)?; @@ -1145,6 +1148,12 @@ impl Chain { Ok(self.txhashset.read().get_output_pos(commit)?) } + /// Get the position of the kernel if it exists in the kernel_pos index. + /// The index is limited to 7 days of recent kernels. + pub fn get_kernel_pos(&self, excess: Commitment) -> Result { + Ok(self.txhashset.read().get_kernel_pos(excess)?) + } + /// outputs by insertion index pub fn unspent_outputs_by_pmmr_index( &self, diff --git a/chain/src/store.rs b/chain/src/store.rs index cf70a792cb..5299f39a65 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -131,6 +131,15 @@ impl ChainStore { .get_ser(&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec())) } + /// Get kernel_pos and block height from index. + pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result<(u64, u64), Error> { + option_to_not_found( + self.db + .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())), + || format!("Kernel pos for excess commit: {:?}", excess), + ) + } + /// Builds a new batch to be used with this store. pub fn batch(&self) -> Result, Error> { Ok(Batch { diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index fd2165fcb8..2b5ec669a3 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -346,6 +346,16 @@ impl TxHashSet { Ok(self.commit_index.get_output_pos(&commit)?) } + /// Get the position of the kernel if it exists in the kernel_pos index. + /// The index is limited to 7 days of recent kernels. + pub fn get_kernel_pos(&self, excess: Commitment) -> Result { + let pos = self + .commit_index + .get_kernel_pos_height(&excess) + .map(|(pos, _)| pos)?; + Ok(pos) + } + /// build a new merkle proof for the given position. pub fn merkle_proof(&mut self, commit: Commitment) -> Result { let pos = self.commit_index.get_output_pos(&commit)?; diff --git a/chain/tests/mine_simple_chain.rs b/chain/tests/mine_simple_chain.rs index 567f110cd4..2e93043325 100644 --- a/chain/tests/mine_simple_chain.rs +++ b/chain/tests/mine_simple_chain.rs @@ -732,12 +732,42 @@ fn spend_in_fork_and_compact() { } chain.validate(false).unwrap(); - if let Err(e) = chain.compact() { - panic!("Error compacting chain: {:?}", e); - } - if let Err(e) = chain.validate(false) { - panic!("Validation error after compacting chain: {:?}", e); + + { + let head = chain.head().unwrap(); + let header_at_horizon = chain + .get_header_by_height( + head.height + .saturating_sub(global::cut_through_horizon() as u64), + ) + .unwrap(); + let block_at_horizon = chain.get_block(&header_at_horizon.hash()).unwrap(); + let block_pre_horizon = chain.get_block(&header_at_horizon.prev_hash).unwrap(); + + // Chain compaction will remove all blocks earlier than the horizon. + chain.compact().expect("chain compaction error"); + + // Check the full block at the horizon is in the db but the previous block + // has been removed from the db as expected. + chain + .get_block(&block_at_horizon.hash()) + .expect("block at horizon in db"); + chain + .get_block(&block_pre_horizon.hash()) + .expect_err("block pre horizon not in db"); + + // Check the kernels for the block still in the db are in the kernel_pos index. + let kernel = block_at_horizon.kernels().first().unwrap(); + chain.get_kernel_pos(kernel.excess).unwrap(); + + // Check the kernels for the removed block are no longer in the kernel_pos index. + let kernel = block_pre_horizon.kernels().first().unwrap(); + chain + .get_kernel_pos(kernel.excess) + .expect_err("kernel_pos should be compacted"); } + + chain.validate(false).expect("chain validation error"); } // Cleanup chain directory clean_output_dir(".grin6"); From ef8e2c5f2d35751882313eed5a93bd1b5eaaa6ba Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Wed, 12 Feb 2020 17:07:07 +0000 Subject: [PATCH 03/11] kernel pos index, add tests, init the index during sync --- chain/src/chain.rs | 5 +- chain/src/txhashset/txhashset.rs | 48 ++++++++++++- chain/tests/mine_simple_chain.rs | 36 ---------- chain/tests/test_kernel_index.rs | 115 +++++++++++++++++++++++++++++++ core/src/consensus.rs | 5 ++ core/src/global.rs | 21 +++++- 6 files changed, 188 insertions(+), 42 deletions(-) create mode 100644 chain/tests/test_kernel_index.rs diff --git a/chain/src/chain.rs b/chain/src/chain.rs index d6055114bb..87eb6b8c36 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -980,9 +980,12 @@ impl Chain { batch.save_body_tail(&tip)?; } - // Rebuild our output_pos index in the db based on fresh UTXO set. + // Initialize output_pos index in the db based on current UTXO set. txhashset.init_output_pos_index(&header_pmmr, &batch)?; + // Initialize the kernel_pos index for recent kernel history. + txhashset.init_kernel_pos_index(&header_pmmr, &batch)?; + // Commit all the changes to the db. batch.commit()?; diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 2b5ec669a3..ef77320928 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -20,6 +20,7 @@ use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::merkle_proof::MerkleProof; use crate::core::core::pmmr::{self, Backend, ReadonlyPMMR, RewindablePMMR, PMMR}; use crate::core::core::{Block, BlockHeader, Input, Output, OutputIdentifier, TxKernel}; +use crate::core::global; use crate::core::ser::{PMMRable, ProtocolVersion}; use crate::error::{Error, ErrorKind}; use crate::store::{Batch, ChainStore}; @@ -482,13 +483,54 @@ impl TxHashSet { Ok(()) } - // TODO - Does this belong here? + pub fn init_kernel_pos_index( + &self, + header_pmmr: &PMMRHandle, + batch: &Batch<'_>, + ) -> Result<(), Error> { + let now = Instant::now(); + let head_header = batch.head_header()?; + let cutoff_height = head_header + .height + .saturating_sub(global::kernel_index_horizon().into()); + let cutoff_hash = header_pmmr.get_header_hash_by_height(cutoff_height)?; + let cutoff_header = batch.get_block_header(&cutoff_hash)?; + let cutoff_pos = if cutoff_header.height == 0 { + 1 + } else { + batch.get_previous_header(&cutoff_header)?.kernel_mmr_size + 1 + }; + let mut kernel_count = 0; + let mut current_header = cutoff_header; + for pos in cutoff_pos..=self.kernel_pmmr_h.last_pos { + while pos > current_header.kernel_mmr_size { + let next_hash = header_pmmr.get_header_hash_by_height(current_header.height + 1)?; + current_header = batch.get_block_header(&next_hash)?; + } + if pmmr::is_leaf(pos) { + if let Some(kernel) = self.kernel_pmmr_h.backend.get_data(pos) { + batch.save_kernel_pos_height(&kernel.excess, pos, current_header.height)?; + kernel_count += 1; + } + } + } + debug!( + "init_kernel_pos_index: {} kernels, took {}s", + kernel_count, + now.elapsed().as_secs() + ); + Ok(()) + } + fn compact_kernel_pos_index(&self, batch: &Batch<'_>) -> Result<(), Error> { let now = Instant::now(); - let tail = batch.tail()?; + let head_header = batch.head_header()?; + let cutoff_height = head_header + .height + .saturating_sub(global::kernel_index_horizon().into()); let deleted = batch .kernel_pos_iter()? - .filter(|(_, (_, height))| *height < tail.height) + .filter(|(_, (_, height))| *height < cutoff_height) .map(|(key, _)| batch.delete(&key)) .count(); debug!( diff --git a/chain/tests/mine_simple_chain.rs b/chain/tests/mine_simple_chain.rs index 2e93043325..d96c73afdb 100644 --- a/chain/tests/mine_simple_chain.rs +++ b/chain/tests/mine_simple_chain.rs @@ -730,43 +730,7 @@ fn spend_in_fork_and_compact() { prev = next.header.clone(); chain.process_block(next, chain::Options::SKIP_POW).unwrap(); } - chain.validate(false).unwrap(); - - { - let head = chain.head().unwrap(); - let header_at_horizon = chain - .get_header_by_height( - head.height - .saturating_sub(global::cut_through_horizon() as u64), - ) - .unwrap(); - let block_at_horizon = chain.get_block(&header_at_horizon.hash()).unwrap(); - let block_pre_horizon = chain.get_block(&header_at_horizon.prev_hash).unwrap(); - - // Chain compaction will remove all blocks earlier than the horizon. - chain.compact().expect("chain compaction error"); - - // Check the full block at the horizon is in the db but the previous block - // has been removed from the db as expected. - chain - .get_block(&block_at_horizon.hash()) - .expect("block at horizon in db"); - chain - .get_block(&block_pre_horizon.hash()) - .expect_err("block pre horizon not in db"); - - // Check the kernels for the block still in the db are in the kernel_pos index. - let kernel = block_at_horizon.kernels().first().unwrap(); - chain.get_kernel_pos(kernel.excess).unwrap(); - - // Check the kernels for the removed block are no longer in the kernel_pos index. - let kernel = block_pre_horizon.kernels().first().unwrap(); - chain - .get_kernel_pos(kernel.excess) - .expect_err("kernel_pos should be compacted"); - } - chain.validate(false).expect("chain validation error"); } // Cleanup chain directory diff --git a/chain/tests/test_kernel_index.rs b/chain/tests/test_kernel_index.rs new file mode 100644 index 0000000000..eca0c984c6 --- /dev/null +++ b/chain/tests/test_kernel_index.rs @@ -0,0 +1,115 @@ +// Copyright 2020 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use self::chain::Chain; +use self::core::core::hash::Hashed; +use self::core::core::{Block, BlockHeader, Transaction}; +use self::core::global::ChainTypes; +use self::core::libtx; +use self::core::pow::Difficulty; +use self::core::{global, pow}; +use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain}; +use chrono::Duration; +use grin_chain as chain; +use grin_core as core; +use grin_keychain as keychain; +use grin_util as util; + +mod chain_test_helper; + +use self::chain_test_helper::{clean_output_dir, init_chain}; + +#[test] +fn kernel_index_after_compaction() { + global::set_mining_mode(ChainTypes::AutomatedTesting); + util::init_test_logger(); + // Cleanup chain directory + let chain_dir = ".grin_kernel_idx"; + clean_output_dir(chain_dir); + + let chain = init_chain(chain_dir, pow::mine_genesis_block().unwrap()); + let mut prev = chain.head_header().unwrap(); + let kc = ExtKeychain::from_random_seed(false).unwrap(); + + // mine some blocks + for n in 0..30 { + let next = prepare_block(&kc, &prev, &chain, 10 + n); + prev = next.header.clone(); + chain.process_block(next, chain::Options::SKIP_POW).unwrap(); + } + + chain.validate(false).unwrap(); + + { + let head = chain.head().unwrap(); + let header_at_horizon = chain + .get_header_by_height( + head.height + .saturating_sub(global::kernel_index_horizon() as u64), + ) + .unwrap(); + let block_at_horizon = chain.get_block(&header_at_horizon.hash()).unwrap(); + let block_pre_horizon = chain.get_block(&header_at_horizon.prev_hash).unwrap(); + + // Chain compaction will remove all blocks earlier than the horizon. + chain.compact().expect("chain compaction error"); + + // Kernels up to and including the horizon must be in the kernel index. + let kernel = block_at_horizon.kernels().first().unwrap(); + chain.get_kernel_pos(kernel.excess).unwrap(); + + // Kernels beyond the horizon are no longer in the kernel index. + let kernel = block_pre_horizon.kernels().first().unwrap(); + chain + .get_kernel_pos(kernel.excess) + .expect_err("kernel_pos should be compacted"); + } + + // Cleanup chain directory + clean_output_dir(chain_dir); +} + +fn prepare_block(kc: &K, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block +where + K: Keychain, +{ + let mut b = prepare_block_nosum(kc, prev, diff, vec![]); + chain.set_txhashset_roots(&mut b).unwrap(); + b +} + +fn prepare_block_nosum(kc: &K, prev: &BlockHeader, diff: u64, txs: Vec<&Transaction>) -> Block +where + K: Keychain, +{ + let proof_size = global::proofsize(); + let key_id = ExtKeychainPath::new(1, diff as u32, 0, 0, 0).to_identifier(); + + let fees = txs.iter().map(|tx| tx.fee()).sum(); + let reward = + libtx::reward::output(kc, &libtx::ProofBuilder::new(kc), &key_id, fees, false).unwrap(); + let mut b = match core::core::Block::new( + prev, + txs.into_iter().cloned().collect(), + Difficulty::from_num(diff), + reward, + ) { + Err(e) => panic!("{:?}", e), + Ok(b) => b, + }; + b.header.timestamp = prev.timestamp + Duration::seconds(60); + b.header.pow.total_difficulty = prev.total_difficulty() + Difficulty::from_num(diff); + b.header.pow.proof = pow::Proof::random(proof_size); + b +} diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 94218e4af1..4da85c5324 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -93,6 +93,11 @@ pub const BASE_EDGE_BITS: u8 = 24; /// easier to reason about. pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32; +/// A relative kernel lock is only applicable within a limited number of recent blocks. +/// This is consensus critical as the lock condition will be met once this number of blocks +/// has been exceeded and the referenced kernel "ages out". +pub const KERNEL_RELATIVE_HEIGHT_LIMIT: u32 = WEEK_HEIGHT as u32; + /// Default number of blocks in the past to determine the height where we request /// a txhashset (and full blocks from). Needs to be long enough to not overlap with /// a long reorg. diff --git a/core/src/global.rs b/core/src/global.rs index 339c3f7f40..b6fc408596 100644 --- a/core/src/global.rs +++ b/core/src/global.rs @@ -19,8 +19,8 @@ use crate::consensus::{ graph_weight, valid_header_version, HeaderInfo, BASE_EDGE_BITS, BLOCK_TIME_SEC, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS, - DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY, MAX_BLOCK_WEIGHT, PROOFSIZE, - SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD, + DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY, KERNEL_RELATIVE_HEIGHT_LIMIT, MAX_BLOCK_WEIGHT, + PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD, }; use crate::core::block::HeaderVersion; use crate::pow::{ @@ -67,6 +67,12 @@ pub const AUTOMATED_TESTING_CUT_THROUGH_HORIZON: u32 = 20; /// Testing cut through horizon in blocks pub const USER_TESTING_CUT_THROUGH_HORIZON: u32 = 70; +/// Kernel index horizon for automated tests +pub const AUTOMATED_TESTING_KERNEL_INDEX_HORIZON: u32 = 25; + +/// Kernel index horizon for user testing +pub const USER_TESTING_KERNEL_INDEX_HORIZON: u32 = 2 * USER_TESTING_CUT_THROUGH_HORIZON; + /// Testing state sync threshold in blocks pub const TESTING_STATE_SYNC_THRESHOLD: u32 = 20; @@ -283,6 +289,17 @@ pub fn cut_through_horizon() -> u32 { } } +/// We maintain an index of "recent" kernels back to this horizon. +/// This must be sufficient for validating an NSKR lock under rewind scenario. +pub fn kernel_index_horizon() -> u32 { + let param_ref = CHAIN_TYPE.read(); + match *param_ref { + ChainTypes::AutomatedTesting => AUTOMATED_TESTING_KERNEL_INDEX_HORIZON, + ChainTypes::UserTesting => USER_TESTING_KERNEL_INDEX_HORIZON, + _ => KERNEL_RELATIVE_HEIGHT_LIMIT + CUT_THROUGH_HORIZON, + } +} + /// Threshold at which we can request a txhashset (and full blocks from) pub fn state_sync_threshold() -> u32 { let param_ref = CHAIN_TYPE.read(); From 81ff36801ae14497383d324bea2012c18c8c2f35 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Thu, 13 Feb 2020 11:00:02 +0000 Subject: [PATCH 04/11] comments --- chain/src/txhashset/txhashset.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index ef77320928..e4193bf1ea 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -483,6 +483,9 @@ impl TxHashSet { Ok(()) } + /// Initialize the kernel_pos index. + /// Find the block header at the kernel index horizon (14 days of history). + /// Then iterate over all kernels since then and add to the index. pub fn init_kernel_pos_index( &self, header_pmmr: &PMMRHandle, @@ -522,6 +525,8 @@ impl TxHashSet { Ok(()) } + /// Compact the kernel_pos index by removing any entries older than the kernel index horizon. + /// This will ensure we always have 14 days of kernel history in the index. fn compact_kernel_pos_index(&self, batch: &Batch<'_>) -> Result<(), Error> { let now = Instant::now(); let head_header = batch.head_header()?; From fee483460db24ba375b92fa75a331aaac6a2f9cb Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 15 Feb 2020 16:08:25 +0000 Subject: [PATCH 05/11] wip --- chain/src/chain.rs | 4 ++-- chain/src/store.rs | 32 ++++++++++++++++++-------------- chain/src/txhashset/txhashset.rs | 19 +++++++++++++------ 3 files changed, 33 insertions(+), 22 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 87eb6b8c36..ca33634c13 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1090,7 +1090,7 @@ impl Chain { } else { global::cut_through_horizon() }; - let next_compact = tail.height.saturating_add(threshold); + let next_compact = tail.height.saturating_add(threshold.into()); if next_compact > head.height { debug!( "compact: skipping startup compaction (next at {})", @@ -1152,7 +1152,7 @@ impl Chain { } /// Get the position of the kernel if it exists in the kernel_pos index. - /// The index is limited to 7 days of recent kernels. + /// The index is limited to 14 days of recent kernels. pub fn get_kernel_pos(&self, excess: Commitment) -> Result { Ok(self.txhashset.read().get_kernel_pos(excess)?) } diff --git a/chain/src/store.rs b/chain/src/store.rs index 5299f39a65..22c284a69b 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -132,12 +132,12 @@ impl ChainStore { } /// Get kernel_pos and block height from index. - pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result<(u64, u64), Error> { - option_to_not_found( - self.db - .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())), - || format!("Kernel pos for excess commit: {:?}", excess), - ) + /// Returns a vec of possible (pos, height) entries in the MMR. + /// Returns an empty vec if no entries found. + pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result, Error> { + self.db + .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())) + .map(|x| x.unwrap_or(vec![])) } /// Builds a new batch to be used with this store. @@ -292,25 +292,29 @@ impl<'a> Batch<'a> { pos: u64, height: u64, ) -> Result<(), Error> { + let new_value = (pos, height); + let mut entry = self.get_kernel_pos_height(excess)?; + let idx = entry.binary_search(&new_value).unwrap_or_else(|x| x); + entry.insert(idx, new_value); self.db.put_ser( &to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())[..], - &(pos, height), + &entry, ) } /// Iterator over the kernel_pos index. - pub fn kernel_pos_iter(&self) -> Result, Error> { + pub fn kernel_pos_iter(&self) -> Result>, Error> { let key = to_key(KERNEL_POS_PREFIX, &mut "".to_string().into_bytes()); self.db.iter(&key) } /// Get kernel_pos and block height from index. - pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result<(u64, u64), Error> { - option_to_not_found( - self.db - .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())), - || format!("Kernel pos for excess commit: {:?}", excess), - ) + /// Returns a vec of possible (pos, height) entries in the MMR. + /// Returns an empty vec if no entries found. + pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result, Error> { + self.db + .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())) + .map(|x| x.unwrap_or(vec![])) } /// Get output_pos from index. diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index e4193bf1ea..035b96b4f4 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -300,6 +300,7 @@ impl TxHashSet { .elements_from_pmmr_index(start_index, max_count, max_index) } + /// TODO - Leverage the kernel_pos as a quick way of achieving this for recent kernels. /// Find a kernel with a given excess. Work backwards from `max_index` to `min_index` pub fn find_kernel( &self, @@ -347,13 +348,19 @@ impl TxHashSet { Ok(self.commit_index.get_output_pos(&commit)?) } + /// TODO - This needs to be a multi-step process. + /// TODO - Rename this. + /// First we need to look the entry up in the index. + /// This entry will be a vec of (pos, height). + /// The entry may be an empty vec. + /// We then need to filter this vec by height (discounting anything beyond current head) + /// and then actually look the kernels up, filtering out any that do not align with the index. + /// /// Get the position of the kernel if it exists in the kernel_pos index. - /// The index is limited to 7 days of recent kernels. - pub fn get_kernel_pos(&self, excess: Commitment) -> Result { - let pos = self - .commit_index - .get_kernel_pos_height(&excess) - .map(|(pos, _)| pos)?; + /// The index is limited to 14 days of recent kernels. + pub fn get_kernel_pos(&self, excess: Commitment) -> Result, Error> { + let pos = self.commit_index.get_kernel_pos_height(&excess); + // .map(|entry| entry)?; Ok(pos) } From 02f8accf577f6ef6d582b61340c8427e1680972f Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Tue, 3 Mar 2020 21:14:53 +0000 Subject: [PATCH 06/11] wip --- chain/src/store.rs | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/chain/src/store.rs b/chain/src/store.rs index 22c284a69b..4d8ede4f6c 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -134,10 +134,12 @@ impl ChainStore { /// Get kernel_pos and block height from index. /// Returns a vec of possible (pos, height) entries in the MMR. /// Returns an empty vec if no entries found. - pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result, Error> { - self.db - .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())) - .map(|x| x.unwrap_or(vec![])) + pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result<(u64, u64), Error> { + option_to_not_found( + self.db + .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())), + || format!("Kernel position for: {:?}", excess), + ) } /// Builds a new batch to be used with this store. @@ -292,29 +294,25 @@ impl<'a> Batch<'a> { pos: u64, height: u64, ) -> Result<(), Error> { - let new_value = (pos, height); - let mut entry = self.get_kernel_pos_height(excess)?; - let idx = entry.binary_search(&new_value).unwrap_or_else(|x| x); - entry.insert(idx, new_value); self.db.put_ser( &to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())[..], - &entry, + &(pos, height), ) } /// Iterator over the kernel_pos index. - pub fn kernel_pos_iter(&self) -> Result>, Error> { + pub fn kernel_pos_iter(&self) -> Result, Error> { let key = to_key(KERNEL_POS_PREFIX, &mut "".to_string().into_bytes()); self.db.iter(&key) } /// Get kernel_pos and block height from index. - /// Returns a vec of possible (pos, height) entries in the MMR. - /// Returns an empty vec if no entries found. - pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result, Error> { - self.db - .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())) - .map(|x| x.unwrap_or(vec![])) + pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result<(u64, u64), Error> { + option_to_not_found( + self.db + .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())), + || format!("Kernel pos for excess: {:?}", excess), + ) } /// Get output_pos from index. From c57fbfd8a7ba91b4b6a1267da1e27f299f88c328 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Wed, 4 Mar 2020 16:20:13 +0000 Subject: [PATCH 07/11] simplify kernel_pos index, need undo list per block to handle rewind/undo this is conceptually similar to output_pos but we support duplicates rather than spending to remove --- chain/src/chain.rs | 4 ++-- chain/src/store.rs | 11 +++++++++++ chain/src/txhashset/txhashset.rs | 17 +++++------------ 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index ca33634c13..ff9bcd2d62 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1107,7 +1107,7 @@ impl Chain { // Remove historical blocks from the db unless we are running in archive mode. if !self.archive_mode { - self.remove_historical_blocks(&header_pmmr, &mut batch)?; + self.remove_historical_blocks(&header_pmmr, &batch)?; } // Compact the txhashset itself (rewriting the pruned backend files). @@ -1154,7 +1154,7 @@ impl Chain { /// Get the position of the kernel if it exists in the kernel_pos index. /// The index is limited to 14 days of recent kernels. pub fn get_kernel_pos(&self, excess: Commitment) -> Result { - Ok(self.txhashset.read().get_kernel_pos(excess)?) + self.txhashset.read().get_kernel_pos(excess) } /// outputs by insertion index diff --git a/chain/src/store.rs b/chain/src/store.rs index 4d8ede4f6c..fccfb28f02 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -38,6 +38,7 @@ const KERNEL_POS_PREFIX: u8 = b'k'; const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B'; const BLOCK_SUMS_PREFIX: u8 = b'M'; const BLOCK_SPENT_PREFIX: u8 = b'S'; +const BLOCK_KERNEL_UNDO_PREFIX: u8 = b'U'; /// All chain-related database operations pub struct ChainStore { @@ -211,6 +212,16 @@ impl<'a> Batch<'a> { Ok(()) } + /// We maintain an "undo" index for each full block to allow the kernel_pos + /// to be easily reverted during rewind. + /// We allow duplicate kernels so we need to know what to revert the kernel_pos + /// index to if we "undo" a kernel when rewinding a block. + pub fn save_kernel_undo_index(&self, h: &Hash, pos: &Vec) -> Result<(), Error> { + self.db + .put_ser(&to_key(BLOCK_KERNEL_UNDO_PREFIX, &mut h.to_vec())[..], pos)?; + Ok(()) + } + /// Migrate a block stored in the db by serializing it using the provided protocol version. /// Block may have been read using a previous protocol version but we do not actually care. pub fn migrate_block(&self, b: &Block, version: ProtocolVersion) -> Result<(), Error> { diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 035b96b4f4..b89fc2fb48 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -348,20 +348,13 @@ impl TxHashSet { Ok(self.commit_index.get_output_pos(&commit)?) } - /// TODO - This needs to be a multi-step process. - /// TODO - Rename this. - /// First we need to look the entry up in the index. - /// This entry will be a vec of (pos, height). - /// The entry may be an empty vec. - /// We then need to filter this vec by height (discounting anything beyond current head) - /// and then actually look the kernels up, filtering out any that do not align with the index. - /// /// Get the position of the kernel if it exists in the kernel_pos index. /// The index is limited to 14 days of recent kernels. - pub fn get_kernel_pos(&self, excess: Commitment) -> Result, Error> { - let pos = self.commit_index.get_kernel_pos_height(&excess); - // .map(|entry| entry)?; - Ok(pos) + pub fn get_kernel_pos(&self, excess: Commitment) -> Result { + Ok(self + .commit_index + .get_kernel_pos_height(&excess) + .map(|x| x.1)?) } /// build a new merkle proof for the given position. From 2d0e2bb15bc25af9c17aa6a1f31bbc47b728f8a8 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Wed, 4 Mar 2020 16:27:09 +0000 Subject: [PATCH 08/11] cleanup --- chain/src/txhashset/txhashset.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index b89fc2fb48..5f848e6519 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -505,7 +505,7 @@ impl TxHashSet { }; let mut kernel_count = 0; let mut current_header = cutoff_header; - for pos in cutoff_pos..=self.kernel_pmmr_h.last_pos { + for pos in cutoff_pos..(self.kernel_pmmr_h.last_pos + 1) { while pos > current_header.kernel_mmr_size { let next_hash = header_pmmr.get_header_hash_by_height(current_header.height + 1)?; current_header = batch.get_block_header(&next_hash)?; From 498fea52fdf9b42ca95dcd1de05988aab75539e0 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Thu, 5 Mar 2020 17:41:16 +0000 Subject: [PATCH 09/11] store and delete the kernel undo list per block store the kernel_pos index when we call apply_kernel --- chain/src/store.rs | 50 ++++++++++++++------------------ chain/src/txhashset/txhashset.rs | 45 +++++++++++++++------------- chain/src/types.rs | 2 +- 3 files changed, 48 insertions(+), 49 deletions(-) diff --git a/chain/src/store.rs b/chain/src/store.rs index fccfb28f02..58796b9dd9 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -118,7 +118,7 @@ impl ChainStore { /// Get PMMR pos for the given output commitment. pub fn get_output_pos(&self, commit: &Commitment) -> Result { match self.get_output_pos_height(commit)? { - Some((pos, _)) => Ok(pos), + Some(pos) => Ok(pos.pos), None => Err(Error::NotFoundErr(format!( "Output position for: {:?}", commit @@ -127,15 +127,14 @@ impl ChainStore { } /// Get PMMR pos and block height for the given output commitment. - pub fn get_output_pos_height(&self, commit: &Commitment) -> Result, Error> { - self.db - .get_ser(&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec())) + pub fn get_output_pos_height(&self, commit: &Commitment) -> Result, Error> { + self.db.get_ser(&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec())) } /// Get kernel_pos and block height from index. /// Returns a vec of possible (pos, height) entries in the MMR. /// Returns an empty vec if no entries found. - pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result<(u64, u64), Error> { + pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result { option_to_not_found( self.db .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())), @@ -249,6 +248,7 @@ impl<'a> Batch<'a> { { let _ = self.delete_block_sums(bh); let _ = self.delete_spent_index(bh); + let _ = self.delete_kernel_undo_index(bh); } Ok(()) @@ -266,15 +266,10 @@ impl<'a> Batch<'a> { } /// Save output_pos and block height to index. - pub fn save_output_pos_height( - &self, - commit: &Commitment, - pos: u64, - height: u64, - ) -> Result<(), Error> { + pub fn save_output_pos_height(&self, commit: &Commitment, pos: CommitPos) -> Result<(), Error> { self.db.put_ser( &to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec())[..], - &(pos, height), + &pos, ) } @@ -293,32 +288,27 @@ impl<'a> Batch<'a> { } /// Iterator over the output_pos index. - pub fn output_pos_iter(&self) -> Result, Error> { + pub fn output_pos_iter(&self) -> Result, Error> { let key = to_key(OUTPUT_POS_PREFIX, &mut "".to_string().into_bytes()); self.db.iter(&key) } /// Save kernel_pos and block height to index. - pub fn save_kernel_pos_height( - &self, - excess: &Commitment, - pos: u64, - height: u64, - ) -> Result<(), Error> { + pub fn save_kernel_pos_height(&self, excess: &Commitment, pos: CommitPos) -> Result<(), Error> { self.db.put_ser( &to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())[..], - &(pos, height), + &pos, ) } /// Iterator over the kernel_pos index. - pub fn kernel_pos_iter(&self) -> Result, Error> { + pub fn kernel_pos_iter(&self) -> Result, Error> { let key = to_key(KERNEL_POS_PREFIX, &mut "".to_string().into_bytes()); self.db.iter(&key) } /// Get kernel_pos and block height from index. - pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result<(u64, u64), Error> { + pub fn get_kernel_pos_height(&self, excess: &Commitment) -> Result { option_to_not_found( self.db .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())), @@ -326,10 +316,10 @@ impl<'a> Batch<'a> { ) } - /// Get output_pos from index. + /// Get PMMR pos for the given output commitment. pub fn get_output_pos(&self, commit: &Commitment) -> Result { match self.get_output_pos_height(commit)? { - Some((pos, _)) => Ok(pos), + Some(pos) => Ok(pos.pos), None => Err(Error::NotFoundErr(format!( "Output position for: {:?}", commit @@ -337,10 +327,9 @@ impl<'a> Batch<'a> { } } - /// Get output_pos and block height from index. - pub fn get_output_pos_height(&self, commit: &Commitment) -> Result, Error> { - self.db - .get_ser(&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec())) + /// Get PMMR pos and block height for the given output commitment. + pub fn get_output_pos_height(&self, commit: &Commitment) -> Result, Error> { + self.db.get_ser(&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec())) } /// Get the previous header. @@ -368,6 +357,11 @@ impl<'a> Batch<'a> { .delete(&to_key(BLOCK_SPENT_PREFIX, &mut bh.to_vec())) } + fn delete_kernel_undo_index(&self, bh: &Hash) -> Result<(), Error> { + self.db + .delete(&to_key(BLOCK_KERNEL_UNDO_PREFIX, &mut bh.to_vec())) + } + /// Save block_sums for the block. pub fn save_block_sums(&self, h: &Hash, sums: BlockSums) -> Result<(), Error> { self.db diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 5f848e6519..a6bec05ad5 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -227,10 +227,10 @@ impl TxHashSet { pub fn get_unspent(&self, output_id: &OutputIdentifier) -> Result, Error> { let commit = output_id.commit; match self.commit_index.get_output_pos_height(&commit) { - Ok(Some((pos, height))) => { + Ok(Some(pos)) => { let output_pmmr: ReadonlyPMMR<'_, Output, _> = ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); - if let Some(out) = output_pmmr.get_data(pos) { + if let Some(out) = output_pmmr.get_data(pos.pos) { if OutputIdentifier::from(out) == *output_id { Ok(Some(CommitPos { pos, height })) } else { @@ -354,7 +354,7 @@ impl TxHashSet { Ok(self .commit_index .get_kernel_pos_height(&excess) - .map(|x| x.1)?) + .map(|x| x.pos)?) } /// build a new merkle proof for the given position. @@ -414,9 +414,9 @@ impl TxHashSet { // Iterate over the current output_pos index, removing any entries that // do not point to to the expected output. let mut removed_count = 0; - for (key, (pos, _)) in batch.output_pos_iter()? { - if let Some(out) = output_pmmr.get_data(pos) { - if let Ok(pos_via_mmr) = batch.get_output_pos(&out.commitment()) { + for (key, pos) in batch.output_pos_iter()? { + if let Some(out) = output_pmmr.get_data(pos.pos) { + if let Ok(pos_via_mmr) = batch.get_output_pos_height(&out.commitment())? { // If the pos matches and the index key matches the commitment // then keep the entry, other we want to clean it up. if pos == pos_via_mmr && batch.is_match_output_pos_key(&key, &out.commitment()) @@ -471,7 +471,8 @@ impl TxHashSet { // Note: MMR position is 1-based and not 0-based, so here must be '>' instead of '>=' break; } - batch.save_output_pos_height(&commit, pos, h.height)?; + let height = h.height; + batch.save_output_pos_height(&commit, CommitPos { pos, height })?; i += 1; } } @@ -512,7 +513,8 @@ impl TxHashSet { } if pmmr::is_leaf(pos) { if let Some(kernel) = self.kernel_pmmr_h.backend.get_data(pos) { - batch.save_kernel_pos_height(&kernel.excess, pos, current_header.height)?; + let height = current_header.height; + batch.save_kernel_pos_height(&kernel.excess, CommitPos { pos, height })?; kernel_count += 1; } } @@ -535,7 +537,7 @@ impl TxHashSet { .saturating_sub(global::kernel_index_horizon().into()); let deleted = batch .kernel_pos_iter()? - .filter(|(_, (_, height))| *height < cutoff_height) + .filter(|(_, pos)| pos.height < cutoff_height) .map(|(key, _)| batch.delete(&key)) .count(); debug!( @@ -1011,8 +1013,6 @@ impl<'a> Extension<'a> { } /// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs). - /// Returns a vec of commit_pos representing the pos and height of the outputs spent - /// by this block. pub fn apply_block(&mut self, b: &Block, batch: &Batch<'_>) -> Result<(), Error> { let mut affected_pos = vec![]; @@ -1022,7 +1022,7 @@ impl<'a> Extension<'a> { for out in b.outputs() { let pos = self.apply_output(out, batch)?; affected_pos.push(pos); - batch.save_output_pos_height(&out.commitment(), pos, b.header.height)?; + batch.save_output_pos_height(&out.commitment(), CommitPos { pos, height })?; } // Remove the output from the output and rangeproof MMRs. @@ -1037,10 +1037,15 @@ impl<'a> Extension<'a> { } batch.save_spent_index(&b.hash(), &spent)?; + let mut kernel_undo_list = vec![]; for kernel in b.kernels() { + if let Ok(prev_pos) = batch.get_kernel_pos_height(&kernel.excess) { + kernel_undo_list.push(prev_pos); + } let pos = self.apply_kernel(kernel)?; - batch.save_kernel_pos_height(&kernel.excess(), pos, b.header.height)?; + batch.save_kernel_pos_height(&kernel.excess(), CommitPos { pos, height })?; } + batch.save_kernel_undo_index(&b.hash(), &kernel_undo_list)?; // Update our BitmapAccumulator based on affected outputs (both spent and created). self.apply_to_bitmap_accumulator(&affected_pos)?; @@ -1069,9 +1074,9 @@ impl<'a> Extension<'a> { fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result { let commit = input.commitment(); - if let Some((pos, height)) = batch.get_output_pos_height(&commit)? { + if let Some(pos) = batch.get_output_pos_height(&commit)? { // First check this input corresponds to an existing entry in the output MMR. - if let Some(out) = self.output_pmmr.get_data(pos) { + if let Some(out) = self.output_pmmr.get_data(pos.pos) { if OutputIdentifier::from(input) != out { return Err(ErrorKind::TxHashSetErr("output pmmr mismatch".to_string()).into()); } @@ -1080,12 +1085,12 @@ impl<'a> Extension<'a> { // Now prune the output_pmmr, rproof_pmmr and their storage. // Input is not valid if we cannot prune successfully (to spend an unspent // output). - match self.output_pmmr.prune(pos) { + match self.output_pmmr.prune(pos.pos) { Ok(true) => { self.rproof_pmmr - .prune(pos) + .prune(pos.pos) .map_err(ErrorKind::TxHashSetErr)?; - Ok(CommitPos { pos, height }) + Ok(pos) } Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()), Err(e) => Err(ErrorKind::TxHashSetErr(e).into()), @@ -1280,8 +1285,8 @@ impl<'a> Extension<'a> { // reused output commitment. For example an output at pos 1, spent, reused at pos 2. // The output_pos index should be updated to reflect the old pos 1 when unspent. if let Ok(spent) = spent { - for (x, y) in block.inputs().into_iter().zip(spent) { - batch.save_output_pos_height(&x.commitment(), y.pos, y.height)?; + for (input, pos) in block.inputs().into_iter().zip(spent) { + batch.save_output_pos_height(&input.commitment(), pos)?; } } diff --git a/chain/src/types.rs b/chain/src/types.rs index b07e100fdb..e19a4286e6 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -259,7 +259,7 @@ impl OutputRoots { } /// Minimal struct representing a known MMR position and associated block height. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct CommitPos { /// MMR position pub pos: u64, From 62bbf913dccb329487bdfa20bb58fddf066d4a72 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 6 Mar 2020 16:28:46 +0000 Subject: [PATCH 10/11] handle kernel_pos during block rewind --- chain/src/store.rs | 29 ++++++++++++++++++--- chain/src/txhashset/txhashset.rs | 44 ++++++++++++++++++++------------ 2 files changed, 52 insertions(+), 21 deletions(-) diff --git a/chain/src/store.rs b/chain/src/store.rs index 58796b9dd9..55b339b3a2 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -215,7 +215,11 @@ impl<'a> Batch<'a> { /// to be easily reverted during rewind. /// We allow duplicate kernels so we need to know what to revert the kernel_pos /// index to if we "undo" a kernel when rewinding a block. - pub fn save_kernel_undo_index(&self, h: &Hash, pos: &Vec) -> Result<(), Error> { + pub fn save_kernel_undo_list( + &self, + h: &Hash, + pos: &Vec<(Commitment, CommitPos)>, + ) -> Result<(), Error> { self.db .put_ser(&to_key(BLOCK_KERNEL_UNDO_PREFIX, &mut h.to_vec())[..], pos)?; Ok(()) @@ -248,7 +252,7 @@ impl<'a> Batch<'a> { { let _ = self.delete_block_sums(bh); let _ = self.delete_spent_index(bh); - let _ = self.delete_kernel_undo_index(bh); + let _ = self.delete_kernel_undo_list(bh); } Ok(()) @@ -273,12 +277,18 @@ impl<'a> Batch<'a> { ) } - /// Delete the output_pos index entry for a spent output. + /// Delete a output_pos index entry. pub fn delete_output_pos_height(&self, commit: &Commitment) -> Result<(), Error> { self.db .delete(&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec())) } + /// Delete a kernel_pos index entry + pub fn delete_kernel_pos_height(&self, excess: &Commitment) -> Result<(), Error> { + self.db + .delete(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())) + } + /// When using the output_pos iterator we have access to the index keys but not the /// original commitment that the key is constructed from. So we need a way of comparing /// a key with another commitment without reconstructing the commitment from the key bytes. @@ -357,7 +367,7 @@ impl<'a> Batch<'a> { .delete(&to_key(BLOCK_SPENT_PREFIX, &mut bh.to_vec())) } - fn delete_kernel_undo_index(&self, bh: &Hash) -> Result<(), Error> { + fn delete_kernel_undo_list(&self, bh: &Hash) -> Result<(), Error> { self.db .delete(&to_key(BLOCK_KERNEL_UNDO_PREFIX, &mut bh.to_vec())) } @@ -416,6 +426,17 @@ impl<'a> Batch<'a> { ) } + /// Get the kernel "undo list" from the db for the specified block. + /// If we need to rewind a block then we use this to revert the index to previous kernel pos + /// in the case of duplicates. + pub fn get_kernel_undo_list(&self, bh: &Hash) -> Result, Error> { + option_to_not_found( + self.db + .get_ser(&to_key(BLOCK_KERNEL_UNDO_PREFIX, &mut bh.to_vec())), + || format!("kernel undo list: {}", bh), + ) + } + /// Commits this batch. If it's a child batch, it will be merged with the /// parent, otherwise the batch is written to db. pub fn commit(self) -> Result<(), Error> { diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index a6bec05ad5..9128f3eb35 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -1040,12 +1040,12 @@ impl<'a> Extension<'a> { let mut kernel_undo_list = vec![]; for kernel in b.kernels() { if let Ok(prev_pos) = batch.get_kernel_pos_height(&kernel.excess) { - kernel_undo_list.push(prev_pos); + kernel_undo_list.push((kernel.excess, prev_pos)); } let pos = self.apply_kernel(kernel)?; batch.save_kernel_pos_height(&kernel.excess(), CommitPos { pos, height })?; } - batch.save_kernel_undo_index(&b.hash(), &kernel_undo_list)?; + batch.save_kernel_undo_list(&b.hash(), &kernel_undo_list)?; // Update our BitmapAccumulator based on affected outputs (both spent and created). self.apply_to_bitmap_accumulator(&affected_pos)?; @@ -1235,6 +1235,9 @@ impl<'a> Extension<'a> { header: &BlockHeader, batch: &Batch<'_>, ) -> Result, Error> { + // Look the full block up in the db. We can only rewind full blocks. + let block = batch.get_block(&header.hash())?; + // The spent index allows us to conveniently "unspend" everything in a block. let spent = batch.get_spent_index(&header.hash()); @@ -1263,25 +1266,13 @@ impl<'a> Extension<'a> { let mut affected_pos = spent_pos.clone(); affected_pos.push(self.output_pmmr.last_pos); - // Remove any entries from the output_pos created by the block being rewound. - let block = batch.get_block(&header.hash())?; - let mut missing_count = 0; + // Remove any output_pos entries created by the block being rewound. for out in block.outputs() { - if batch.delete_output_pos_height(&out.commitment()).is_err() { - missing_count += 1; - } - } - if missing_count > 0 { - warn!( - "rewind_single_block: {} output_pos entries missing for: {} at {}", - missing_count, - header.hash(), - header.height, - ); + let _ = batch.delete_output_pos_height(&out.commitment()); } // Update output_pos based on "unspending" all spent pos from this block. - // This is necessary to ensure the output_pos index correclty reflects a + // This is necessary to ensure the output_pos index correctly reflects a // reused output commitment. For example an output at pos 1, spent, reused at pos 2. // The output_pos index should be updated to reflect the old pos 1 when unspent. if let Ok(spent) = spent { @@ -1290,9 +1281,28 @@ impl<'a> Extension<'a> { } } + // Update the kernel_pos index for the block being rewound. + self.rewind_single_block_kernels(&block, batch)?; + Ok(affected_pos) } + fn rewind_single_block_kernels(&self, block: &Block, batch: &Batch<'_>) -> Result<(), Error> { + // Remove any kernel_pos entries created by the block being rewound. + for kern in block.kernels() { + let _ = batch.delete_kernel_pos_height(&kern.excess()); + } + + // Add back any previous kernel instances replaced by the block being rewound. + if let Ok(undo_list) = batch.get_kernel_undo_list(&block.hash()) { + for (excess, pos) in undo_list { + batch.save_kernel_pos_height(&excess, pos)?; + } + } + + Ok(()) + } + /// Rewinds the MMRs to the provided positions, given the output and /// kernel pos we want to rewind to. fn rewind_mmrs_to_pos( From eec3137a1b5c7876d1a2ef3bbe8b91e240dee480 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 21 Mar 2020 20:58:34 +0000 Subject: [PATCH 11/11] commit --- chain/src/txhashset/txhashset.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 9128f3eb35..1c9db5452b 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -232,7 +232,7 @@ impl TxHashSet { ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); if let Some(out) = output_pmmr.get_data(pos.pos) { if OutputIdentifier::from(out) == *output_id { - Ok(Some(CommitPos { pos, height })) + Ok(Some(pos)) } else { Ok(None) } @@ -416,7 +416,7 @@ impl TxHashSet { let mut removed_count = 0; for (key, pos) in batch.output_pos_iter()? { if let Some(out) = output_pmmr.get_data(pos.pos) { - if let Ok(pos_via_mmr) = batch.get_output_pos_height(&out.commitment())? { + if let Some(pos_via_mmr) = batch.get_output_pos_height(&out.commitment())? { // If the pos matches and the index key matches the commitment // then keep the entry, other we want to clean it up. if pos == pos_via_mmr && batch.is_match_output_pos_key(&key, &out.commitment()) @@ -1014,6 +1014,7 @@ impl<'a> Extension<'a> { /// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs). pub fn apply_block(&mut self, b: &Block, batch: &Batch<'_>) -> Result<(), Error> { + let height = b.header.height; let mut affected_pos = vec![]; // Apply the output to the output and rangeproof MMRs. @@ -1037,6 +1038,9 @@ impl<'a> Extension<'a> { } batch.save_spent_index(&b.hash(), &spent)?; + // Apply each kernel. + // Update the kernel_pos index to reflect the new kernels. + // Build the kernel "undo list" for this block. let mut kernel_undo_list = vec![]; for kernel in b.kernels() { if let Ok(prev_pos) = batch.get_kernel_pos_height(&kernel.excess) {