Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix kernel history verification #1218

Merged
merged 2 commits into from
Jul 5, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -432,7 +432,7 @@ impl Chain {
// ensure the view is consistent.
txhashset::extending_readonly(&mut txhashset, |extension| {
// TODO - is this rewind guaranteed to be redundant now?
extension.rewind(&header, &header, true, true, true)?;
extension.rewind(&header, &header)?;
extension.validate(&header, skip_rproofs, &NoStatus)?;
Ok(())
})
Expand Down Expand Up @@ -502,7 +502,7 @@ impl Chain {
{
let mut txhashset = self.txhashset.write().unwrap();
txhashset::extending_readonly(&mut txhashset, |extension| {
extension.rewind(&header, &head_header, true, true, true)?;
extension.rewind(&header, &head_header)?;
extension.snapshot(&header)?;
Ok(())
})?;
Expand Down Expand Up @@ -530,7 +530,7 @@ impl Chain {
where
T: TxHashsetWriteStatus,
{
self.txhashset_lock.lock().unwrap();
let _ = self.txhashset_lock.lock().unwrap();
status.on_setup();
let head = self.head().unwrap();
let header_head = self.get_header_head().unwrap();
Expand All @@ -550,7 +550,7 @@ impl Chain {
txhashset::extending(&mut txhashset, &mut batch, |extension| {
// TODO do we need to rewind here? We have no blocks to rewind
// (and we need them for the pos to unremove)
extension.rewind(&header, &header, true, true, true)?;
extension.rewind(&header, &header)?;
extension.validate(&header, false, status)?;
extension.rebuild_index()?;
Ok(())
Expand Down Expand Up @@ -816,7 +816,7 @@ fn setup_head(
let header = store.get_block_header(&head.last_block_h)?;

let res = txhashset::extending(txhashset, &mut batch, |extension| {
extension.rewind(&header, &head_header, true, true, true)?;
extension.rewind(&header, &head_header)?;
extension.validate_roots(&header)?;
debug!(
LOGGER,
Expand Down
2 changes: 1 addition & 1 deletion chain/src/pipe.rs
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,7 @@ pub fn rewind_and_apply_fork(
);

// rewind the sum trees up to the forking block
ext.rewind(&forked_header, &head_header, true, true, true)?;
ext.rewind(&forked_header, &head_header)?;

trace!(
LOGGER,
Expand Down
62 changes: 23 additions & 39 deletions chain/src/txhashset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,11 +65,12 @@ where
fn new(
root_dir: String,
file_name: &str,
prunable: bool,
header: Option<&BlockHeader>,
) -> Result<PMMRHandle<T>, Error> {
let path = Path::new(&root_dir).join(TXHASHSET_SUBDIR).join(file_name);
fs::create_dir_all(path.clone())?;
let be = PMMRBackend::new(path.to_str().unwrap().to_string(), header)?;
let be = PMMRBackend::new(path.to_str().unwrap().to_string(), prunable, header)?;
let sz = be.unpruned_size()?;
Ok(PMMRHandle {
backend: be,
Expand Down Expand Up @@ -120,9 +121,9 @@ impl TxHashSet {
fs::create_dir_all(kernel_file_path.clone())?;

Ok(TxHashSet {
output_pmmr_h: PMMRHandle::new(root_dir.clone(), OUTPUT_SUBDIR, header)?,
rproof_pmmr_h: PMMRHandle::new(root_dir.clone(), RANGE_PROOF_SUBDIR, header)?,
kernel_pmmr_h: PMMRHandle::new(root_dir.clone(), KERNEL_SUBDIR, None)?,
output_pmmr_h: PMMRHandle::new(root_dir.clone(), OUTPUT_SUBDIR, true, header)?,
rproof_pmmr_h: PMMRHandle::new(root_dir.clone(), RANGE_PROOF_SUBDIR, true, header)?,
kernel_pmmr_h: PMMRHandle::new(root_dir.clone(), KERNEL_SUBDIR, false, None)?,
commit_index,
})
}
Expand Down Expand Up @@ -461,9 +462,6 @@ impl<'a> Extension<'a> {
kernel_pos,
&rewind_add_pos,
rewind_rm_pos,
true,
true,
true,
)?;
Ok(())
}
Expand Down Expand Up @@ -725,7 +723,7 @@ impl<'a> Extension<'a> {

// rewind to the specified block for a consistent view
let head_header = self.commit_index.head_header()?;
self.rewind(block_header, &head_header, true, true, true)?;
self.rewind(block_header, &head_header)?;

// then calculate the Merkle Proof based on the known pos
let pos = self.batch.get_output_pos(&output.commit)?;
Expand Down Expand Up @@ -757,9 +755,6 @@ impl<'a> Extension<'a> {
&mut self,
block_header: &BlockHeader,
head_header: &BlockHeader,
rewind_utxo: bool,
rewind_kernel: bool,
rewind_rangeproof: bool,
) -> Result<(), Error> {
trace!(
LOGGER,
Expand Down Expand Up @@ -787,12 +782,7 @@ impl<'a> Extension<'a> {
block_header.kernel_mmr_size,
&rewind_add_pos,
&rewind_rm_pos.1,
rewind_utxo,
rewind_kernel,
rewind_rangeproof,
)?;

Ok(())
)
}

/// Rewinds the MMRs to the provided positions, given the output and
Expand All @@ -803,9 +793,6 @@ impl<'a> Extension<'a> {
kernel_pos: u64,
rewind_add_pos: &Bitmap,
rewind_rm_pos: &Bitmap,
rewind_utxo: bool,
rewind_kernel: bool,
rewind_rproof: bool,
) -> Result<(), Error> {
trace!(
LOGGER,
Expand All @@ -819,22 +806,15 @@ impl<'a> Extension<'a> {
// been sync'd to disk.
self.new_output_commits.retain(|_, &mut v| v <= output_pos);

if rewind_utxo {
self.output_pmmr
.rewind(output_pos, rewind_add_pos, rewind_rm_pos)
.map_err(&ErrorKind::TxHashSetErr)?;
}
if rewind_rproof {
self.rproof_pmmr
.rewind(output_pos, rewind_add_pos, rewind_rm_pos)
.map_err(&ErrorKind::TxHashSetErr)?;
}
if rewind_kernel {
self.kernel_pmmr
.rewind(kernel_pos, rewind_add_pos, rewind_rm_pos)
.map_err(&ErrorKind::TxHashSetErr)?;
}

self.output_pmmr
.rewind(output_pos, rewind_add_pos, rewind_rm_pos)
.map_err(&ErrorKind::TxHashSetErr)?;
self.rproof_pmmr
.rewind(output_pos, rewind_add_pos, rewind_rm_pos)
.map_err(&ErrorKind::TxHashSetErr)?;
self.kernel_pmmr
.rewind(kernel_pos, rewind_add_pos, rewind_rm_pos)
.map_err(&ErrorKind::TxHashSetErr)?;
Ok(())
}

Expand Down Expand Up @@ -1068,14 +1048,17 @@ impl<'a> Extension<'a> {
// fast sync where a reorg past the horizon could allow a whole rewrite of
// the kernel set.
let mut current = header.clone();
let empty_bitmap = Bitmap::create();
loop {
current = self.commit_index.get_block_header(&current.previous)?;
if current.height == 0 {
break;
}
let head_header = self.commit_index.head_header()?;
// rewinding further and further back
self.rewind(&current, &head_header, false, true, false)?;
// rewinding kernels only further and further back
self.kernel_pmmr
.rewind(current.kernel_mmr_size, &empty_bitmap, &empty_bitmap)
.map_err(&ErrorKind::TxHashSetErr)?;

if self.kernel_pmmr.root() != current.kernel_root {
return Err(ErrorKind::InvalidTxHashSet(format!(
"Kernel root at {} does not match",
Expand All @@ -1085,6 +1068,7 @@ impl<'a> Extension<'a> {
}
Ok(())
}

}

/// Packages the txhashset data files into a zip and returns a Read to the
Expand Down
92 changes: 24 additions & 68 deletions store/src/pmmr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ where
T: PMMRable,
{
data_dir: String,
prunable: bool,

This comment was marked as spam.

This comment was marked as spam.

hash_file: AppendOnlyFile,
data_file: AppendOnlyFile,
leaf_set: LeafSet,
Expand All @@ -76,8 +77,10 @@ where
if let Some(elem) = d.1 {
self.data_file.append(&mut ser::ser_vec(&elem).unwrap());

// Add the new position to our leaf_set.
self.leaf_set.add(position);
if self.prunable {
// Add the new position to our leaf_set.
self.leaf_set.add(position);
}
}
}
Ok(())
Expand Down Expand Up @@ -137,7 +140,7 @@ where
/// Return None if pos is a leaf and it has been removed (or pruned or
/// compacted).
fn get_hash(&self, pos: u64) -> Option<(Hash)> {
if pmmr::is_leaf(pos) && !self.leaf_set.includes(pos) {
if self.prunable && pmmr::is_leaf(pos) && !self.leaf_set.includes(pos) {
return None;
}
self.get_from_file(pos)
Expand All @@ -149,7 +152,7 @@ where
if !pmmr::is_leaf(pos) {
return None;
}
if !self.leaf_set.includes(pos) {
if self.prunable && !self.leaf_set.includes(pos) {
return None;
}
self.get_data_from_file(pos)
Expand All @@ -163,7 +166,9 @@ where
rewind_rm_pos: &Bitmap,
) -> Result<(), String> {
// First rewind the leaf_set with the necessary added and removed positions.
self.leaf_set.rewind(rewind_add_pos, rewind_rm_pos);
if self.prunable {
self.leaf_set.rewind(rewind_add_pos, rewind_rm_pos);
}

// Rewind the hash file accounting for pruned/compacted pos
let shift = self.prune_list.get_shift(position);
Expand All @@ -183,6 +188,7 @@ where

/// Remove by insertion position.
fn remove(&mut self, pos: u64) -> Result<(), String> {
assert!(self.prunable, "Remove on non-prunable MMR");
self.leaf_set.remove(pos);
Ok(())
}
Expand Down Expand Up @@ -218,60 +224,30 @@ where
{
/// Instantiates a new PMMR backend.
/// Use the provided dir to store its files.
pub fn new(data_dir: String, header: Option<&BlockHeader>) -> io::Result<PMMRBackend<T>> {
pub fn new(
data_dir: String,
prunable: bool,
header: Option<&BlockHeader>
) -> io::Result<PMMRBackend<T>> {

let hash_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_HASH_FILE))?;
let data_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE))?;

let leaf_set_path = format!("{}/{}", data_dir, PMMR_LEAF_FILE);

// If we received a rewound "snapshot" leaf_set file
// move it into place so we use it.
// If we received a rewound "snapshot" leaf_set file move it into
// place so we use it.
if let Some(header) = header {
let leaf_snapshot_path = format!("{}/{}.{}", data_dir, PMMR_LEAF_FILE, header.hash());
LeafSet::copy_snapshot(leaf_set_path.clone(), leaf_snapshot_path.clone())?;
}

// If we need to migrate legacy prune_list do it here before we start.
// Do *not* migrate if we already have a non-empty prune_list.
let mut prune_list = PruneList::open(format!("{}/{}", data_dir, PMMR_PRUN_FILE))?;
let legacy_prune_list_path = format!("{}/{}", data_dir, LEGACY_PRUNED_FILE);
if prune_list.is_empty() && Path::new(&legacy_prune_list_path).exists() {
debug!(LOGGER, "pmmr: migrating prune_list -> bitmap prune_list");
let legacy_prune_pos = read_ordered_vec(legacy_prune_list_path, 8)?;
for x in legacy_prune_pos {
prune_list.add(x);
}
prune_list.flush()?;
}

// If we need to migrate legacy rm_log to a new leaf_set do it here before we
// start. Do *not* migrate if we already have a non-empty leaf_set.
let mut leaf_set = LeafSet::open(leaf_set_path.clone())?;
let legacy_rm_log_path = format!("{}/{}", data_dir, LEGACY_RM_LOG_FILE);
if leaf_set.is_empty() && Path::new(&legacy_rm_log_path).exists() {
debug!(LOGGER, "pmmr: migrating rm_log -> leaf_set");
let mut rm_log = RemoveLog::open(legacy_rm_log_path)?;
if let Some(header) = header {
// Rewind the rm_log back to the height of the header we care about.
debug!(
LOGGER,
"pmmr: first rewinding rm_log to height {}", header.height
);
rm_log.rewind(header.height as u32)?;
}

let last_pos = {
let total_shift = prune_list.get_total_shift();
let record_len = 32;
let sz = hash_file.size()?;
sz / record_len + total_shift
};

migrate_rm_log(&mut leaf_set, &rm_log, &prune_list, last_pos)?;
}
let prune_list = PruneList::open(format!("{}/{}", data_dir, PMMR_PRUN_FILE))?;
let leaf_set = LeafSet::open(leaf_set_path.clone())?;

This comment was marked as spam.


Ok(PMMRBackend {
data_dir,
prunable,
hash_file,
data_file,
leaf_set,
Expand Down Expand Up @@ -369,6 +345,8 @@ where
where
P: Fn(&[u8]),
{
assert!(self.prunable, "Trying to compact a non-prunable PMMR");

// Paths for tmp hash and data files.
let tmp_prune_file_hash = format!("{}/{}.hashprune", self.data_dir, PMMR_HASH_FILE);
let tmp_prune_file_data = format!("{}/{}.dataprune", self.data_dir, PMMR_DATA_FILE);
Expand Down Expand Up @@ -500,25 +478,3 @@ fn removed_excl_roots(removed: Bitmap) -> Bitmap {
.collect()
}

fn migrate_rm_log(
leaf_set: &mut LeafSet,
rm_log: &RemoveLog,
prune_list: &PruneList,
last_pos: u64,
) -> io::Result<()> {
info!(
LOGGER,
"Migrating rm_log -> leaf_set. Might take a little while... {} pos", last_pos
);

// check every leaf
// if not pruned and not removed, add it to the leaf_set
for x in 1..=last_pos {
if pmmr::is_leaf(x) && !prune_list.is_pruned(x) && !rm_log.includes(x) {
leaf_set.add(x);
}
}

leaf_set.flush()?;
Ok(())
}
Loading