Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bootup fixes for 2.05.0.2.0-rc1 #3112

Merged
merged 6 commits into from
Apr 25, 2022
39 changes: 30 additions & 9 deletions src/chainstate/burn/db/sortdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY;
use crate::net::{Error as NetError, Error};
use crate::util_lib::db::tx_begin_immediate;
use crate::util_lib::db::tx_busy_handler;
use crate::util_lib::db::DBTx;
use crate::util_lib::db::Error as db_error;
use crate::util_lib::db::{
db_mkdirs, query_count, query_row, query_row_columns, query_row_panic, query_rows, sql_pragma,
Expand Down Expand Up @@ -2477,7 +2478,7 @@ impl SortitionDB {
Ok(version)
}

fn apply_schema_2(tx: &SortitionDBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> {
fn apply_schema_2(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> {
for sql_exec in SORTITION_DB_SCHEMA_2 {
tx.execute_batch(sql_exec)?;
}
Expand All @@ -2492,7 +2493,7 @@ impl SortitionDB {
Ok(())
}

fn apply_schema_3(tx: &SortitionDBTx) -> Result<(), db_error> {
fn apply_schema_3(tx: &DBTx) -> Result<(), db_error> {
for sql_exec in SORTITION_DB_SCHEMA_3 {
tx.execute_batch(sql_exec)?;
}
Expand All @@ -2510,30 +2511,32 @@ impl SortitionDB {
if version == expected_version {
Ok(())
} else {
Err(db_error::Other(format!(
"The version of the sortition DB {} does not match the expected {} and cannot be updated from SortitionDB::open()",
version, expected_version
)))
let version_u64 = version.parse::<u64>().unwrap();
Err(db_error::OldSchema(version_u64))
}
}
Ok(None) => panic!("The schema version of the sortition DB is not recorded."),
Err(e) => panic!("Error obtaining the version of the sortition DB: {:?}", e),
}
}

fn check_schema_version_and_update(&mut self, epochs: &[StacksEpoch]) -> Result<(), db_error> {
/// Migrate the sortition DB to its latest version, given the set of system epochs
pub fn check_schema_version_and_update(
&mut self,
epochs: &[StacksEpoch],
) -> Result<(), db_error> {
let expected_version = SORTITION_DB_VERSION.to_string();
loop {
match SortitionDB::get_schema_version(self.conn()) {
Ok(Some(version)) => {
if version == "1" {
let tx = self.tx_begin()?;
SortitionDB::apply_schema_2(&tx, epochs)?;
SortitionDB::apply_schema_2(&tx.deref(), epochs)?;
tx.commit()?;
} else if version == "2" {
// add the tables of schema 3, but do not populate them.
let tx = self.tx_begin()?;
SortitionDB::apply_schema_3(&tx)?;
SortitionDB::apply_schema_3(&tx.deref())?;
tx.commit()?;
} else if version == expected_version {
return Ok(());
Expand All @@ -2547,6 +2550,24 @@ impl SortitionDB {
}
}

/// Open and migrate the sortition DB if it exists.
pub fn migrate_if_exists(path: &str, epochs: &[StacksEpoch]) -> Result<(), db_error> {
if let Err(db_error::OldSchema(_)) = SortitionDB::open(path, false) {
let index_path = db_mkdirs(path)?;
let marf = SortitionDB::open_index(&index_path)?;
let mut db = SortitionDB {
marf,
readwrite: true,
// not used by migration logic
first_block_height: 0,
first_burn_header_hash: BurnchainHeaderHash([0xff; 32]),
};
db.check_schema_version_and_update(epochs)
} else {
Ok(())
}
}

fn add_indexes(&mut self) -> Result<(), db_error> {
// do we need to instantiate indexes?
// only do a transaction if we need to, since this gets called each time the sortition DB
Expand Down
34 changes: 34 additions & 0 deletions src/chainstate/coordinator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ use crate::types::chainstate::{
};
use clarity::vm::database::BurnStateDB;

use crate::chainstate::stacks::index::marf::MARFOpenOpts;

pub use self::comm::CoordinatorCommunication;

pub mod comm;
Expand Down Expand Up @@ -935,3 +937,35 @@ pub fn check_chainstate_db_versions(

Ok(true)
}

/// Migrate all databases to their latest schemas.
/// Verifies that this is possible as well
pub fn migrate_chainstate_dbs(
epochs: &[StacksEpoch],
sortdb_path: &str,
chainstate_path: &str,
chainstate_marf_opts: Option<MARFOpenOpts>,
) -> Result<(), Error> {
if !check_chainstate_db_versions(epochs, sortdb_path, chainstate_path)? {
warn!("Unable to migrate chainstate DBs to the latest schemas in the current epoch");
return Err(DBError::TooOldForEpoch.into());
}

if fs::metadata(&sortdb_path).is_ok() {
info!("Migrating sortition DB to the latest schema version");
SortitionDB::migrate_if_exists(&sortdb_path, epochs)?;
}
if fs::metadata(&chainstate_path).is_ok() {
info!("Migrating chainstate DB to the latest schema version");
let db_config = StacksChainState::get_db_config_from_path(&chainstate_path)?;

// this does the migration internally
let _ = StacksChainState::open(
db_config.mainnet,
db_config.chain_id,
chainstate_path,
chainstate_marf_opts,
)?;
}
Ok(())
}
2 changes: 1 addition & 1 deletion src/net/p2p.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1927,7 +1927,7 @@ impl PeerNetwork {
}

for (event_id, convo) in self.peers.iter() {
if convo.is_authenticated() {
if convo.is_authenticated() && convo.stats.last_contact_time > 0 {
// have handshaked with this remote peer
if convo.stats.last_contact_time
+ (convo.peer_heartbeat as u64)
Expand Down
10 changes: 10 additions & 0 deletions src/util_lib/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,10 @@ pub enum Error {
IOError(IOError),
/// MARF index error
IndexError(MARFError),
/// Old schema error
OldSchema(u64),
/// Database is too old for epoch
TooOldForEpoch,
/// Other error
Other(String),
}
Expand All @@ -127,6 +131,10 @@ impl fmt::Display for Error {
Error::IOError(ref e) => fmt::Display::fmt(e, f),
Error::SqliteError(ref e) => fmt::Display::fmt(e, f),
Error::IndexError(ref e) => fmt::Display::fmt(e, f),
Error::OldSchema(ref s) => write!(f, "Old database schema: {}", s),
Error::TooOldForEpoch => {
write!(f, "Database is not compatible with current system epoch")
}
Error::Other(ref s) => fmt::Display::fmt(s, f),
}
}
Expand All @@ -149,6 +157,8 @@ impl error::Error for Error {
Error::SqliteError(ref e) => Some(e),
Error::IOError(ref e) => Some(e),
Error::IndexError(ref e) => Some(e),
Error::OldSchema(ref _s) => None,
Error::TooOldForEpoch => None,
Error::Other(ref _s) => None,
}
}
Expand Down
2 changes: 2 additions & 0 deletions testnet/stacks-node/src/neon_node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1319,6 +1319,8 @@ impl StacksNode {
// bootstrap nodes *always* allowed
let mut tx = peerdb.tx_begin().unwrap();
for initial_neighbor in initial_neighbors.iter() {
// update peer in case public key changed
PeerDB::update_peer(&mut tx, &initial_neighbor).unwrap();
PeerDB::set_allow_peer(
&mut tx,
initial_neighbor.addr.network_id,
Expand Down
40 changes: 24 additions & 16 deletions testnet/stacks-node/src/run_loop/neon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,12 @@ use stacks::burnchains::{Address, Burnchain};
use stacks::chainstate::burn::db::sortdb::SortitionDB;
use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers};
use stacks::chainstate::coordinator::{
check_chainstate_db_versions, BlockEventDispatcher, ChainsCoordinator, CoordinatorCommunication,
migrate_chainstate_dbs, BlockEventDispatcher, ChainsCoordinator, CoordinatorCommunication,
Error as coord_error,
};
use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState};
use stacks::net::atlas::{AtlasConfig, Attachment, AttachmentInstance, ATTACHMENTS_CHANNEL_SIZE};
use stacks::util_lib::db::Error as db_error;
use stx_genesis::GenesisData;

use crate::monitoring::start_serving_monitoring_metrics;
Expand Down Expand Up @@ -323,29 +325,29 @@ impl RunLoop {
Some(self.should_keep_running.clone()),
);

// Invoke connect() to perform any db instantiation and migration early
if let Err(e) = burnchain_controller.connect_dbs() {
error!("Failed to connect to burnchain databases: {}", e);
panic!();
};

let burnchain_config = burnchain_controller.get_burnchain();
// Upgrade chainstate databases if they exist already
let epochs = burnchain_controller.get_stacks_epochs();
if !check_chainstate_db_versions(
match migrate_chainstate_dbs(
&epochs,
&self.config.get_burn_db_file_path(),
&self.config.get_chainstate_path_str(),
)
.expect("FATAL: unable to query filesystem or databases for version information")
{
error!(
"FATAL: chainstate database(s) are not compatible with the current system epoch"
);
panic!();
Some(self.config.node.get_marf_opts()),
) {
Ok(_) => {}
Err(coord_error::DBError(db_error::TooOldForEpoch)) => {
error!(
"FATAL: chainstate database(s) are not compatible with the current system epoch"
);
panic!();
}
Err(e) => {
panic!("FATAL: unable to query filesystem or databases: {:?}", &e);
}
}

info!("Start syncing Bitcoin headers, feel free to grab a cup of coffee, this can take a while");

let burnchain_config = burnchain_controller.get_burnchain();
let target_burnchain_block_height = match burnchain_config
.get_highest_burnchain_block()
.expect("FATAL: failed to access burnchain database")
Expand All @@ -372,6 +374,12 @@ impl RunLoop {
}
};

// if the chainstate DBs don't exist, this will instantiate them
if let Err(e) = burnchain_controller.connect_dbs() {
error!("Failed to connect to burnchain databases: {}", e);
panic!();
};

// TODO (hack) instantiate the sortdb in the burnchain
let _ = burnchain_controller.sortdb_mut();
burnchain_controller
Expand Down