diff --git a/.github/actions/docsgen/Dockerfile.docsgen b/.github/actions/docsgen/Dockerfile.docsgen index 9bed9ff462..61c95fb70a 100644 --- a/.github/actions/docsgen/Dockerfile.docsgen +++ b/.github/actions/docsgen/Dockerfile.docsgen @@ -10,8 +10,8 @@ RUN cargo build RUN mkdir /out -RUN /src/target/debug/blockstack-core docgen | jq . > /out/clarity-reference.json -RUN /src/target/debug/blockstack-core docgen_boot | jq . > /out/boot-contracts-reference.json +RUN /src/target/debug/stacks-inspect docgen | jq . > /out/clarity-reference.json +RUN /src/target/debug/stacks-inspect docgen_boot | jq . > /out/boot-contracts-reference.json FROM scratch AS export-stage COPY --from=build /out/clarity-reference.json / diff --git a/.vscode/launch.json b/.vscode/launch.json index 59cd020acf..225e311dea 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -8,11 +8,11 @@ "cargo": { "args": [ "build", - "--bin=blockstack-core", + "--bin=stacks-inspect", "--package=blockstack-core" ], "filter": { - "name": "blockstack-core", + "name": "stacks-inspect", "kind": "bin" } }, @@ -118,11 +118,11 @@ "args": [ "test", "--no-run", - "--bin=blockstack-core", + "--bin=stacks-inspect", "--package=blockstack-core" ], "filter": { - "name": "blockstack-core", + "name": "stacks-inspect", "kind": "bin" } }, diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f6cc7e761..63b918d51a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,25 +13,27 @@ the node to spend up to 30 minutes migrating the data to a new schema. ### Changed - The MARF implementation will now defer calculating the root hash of a new trie until the moment the trie is committed to disk. This avoids gratuitous hash -calculations, and yields a performance improvement of anywhere between 10x and -200x (#3041). + calculations, and yields a performance improvement of anywhere between 10x and + 200x (#3041). - The MARF implementation will now store tries to an external file for instances where the tries are expected to exceed the SQLite page size (namely, the -Clarity database). This improves read performance by a factor of 10x to 14x -(#3059). + Clarity database). This improves read performance by a factor of 10x to 14x + (#3059). - The MARF implementation may now cache trie nodes in RAM if directed to do so by an environment variable (#3042). - Sortition processing performance has been improved by about an order of magnitude, by avoiding a slew of expensive database reads (#3045). WARNING: -applying this change to an existing chainstate directory will take a few -minutes when the node starts up. + applying this change to an existing chainstate directory will take a few + minutes when the node starts up. - Updated chains coordinator so that before a Stacks block or a burn block is processed, an event is sent through the event dispatcher. This fixes #3015. - Expose a node's public key and public key hash160 (i.e. what appears in /v2/neighbors) via the /v2/info API endpoint (#3046) - Reduced the default subsequent block attempt timeout from 180 seconds to 30 seconds, based on benchmarking the new MARF performance data during a period -of network congestion (#3098) + of network congestion (#3098) +- The `blockstack-core` binary has been renamed to `stacks-inspect`. + This binary provides CLI tools for chain and mempool inspection. ## [2.05.0.1.0] diff --git a/Cargo.toml b/Cargo.toml index 0f2ec8507b..b4ecf0fc57 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ name = "blockstack_lib" path = "src/lib.rs" [[bin]] -name = "blockstack-core" +name = "stacks-inspect" path = "src/main.rs" [[bin]] diff --git a/build-scripts/Dockerfile.linux-arm64 b/build-scripts/Dockerfile.linux-arm64 index 66a61bf5f5..7acc30f6bf 100644 --- a/build-scripts/Dockerfile.linux-arm64 +++ b/build-scripts/Dockerfile.linux-arm64 @@ -20,4 +20,4 @@ RUN CC=aarch64-linux-gnu-gcc \ RUN mkdir /out && cp -R /src/target/aarch64-unknown-linux-gnu/release/. /out FROM scratch AS export-stage -COPY --from=build /out/blockstack-core /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file diff --git a/build-scripts/Dockerfile.linux-armv7 b/build-scripts/Dockerfile.linux-armv7 index b422761193..9fb50d18bc 100644 --- a/build-scripts/Dockerfile.linux-armv7 +++ b/build-scripts/Dockerfile.linux-armv7 @@ -20,4 +20,4 @@ RUN CC=arm-linux-gnueabihf-gcc \ RUN mkdir /out && cp -R /src/target/armv7-unknown-linux-gnueabihf/release/. /out FROM scratch AS export-stage -COPY --from=build /out/blockstack-core /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file diff --git a/build-scripts/Dockerfile.linux-musl-x64 b/build-scripts/Dockerfile.linux-musl-x64 index 6bbbc85fa7..9c6c604341 100644 --- a/build-scripts/Dockerfile.linux-musl-x64 +++ b/build-scripts/Dockerfile.linux-musl-x64 @@ -20,4 +20,4 @@ RUN CC=musl-gcc \ RUN mkdir /out && cp -R /src/target/x86_64-unknown-linux-musl/release/. /out FROM scratch AS export-stage -COPY --from=build /out/blockstack-core /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file diff --git a/build-scripts/Dockerfile.linux-x64 b/build-scripts/Dockerfile.linux-x64 index 65cb6dfe83..b4abb08aed 100644 --- a/build-scripts/Dockerfile.linux-x64 +++ b/build-scripts/Dockerfile.linux-x64 @@ -17,4 +17,4 @@ RUN cargo build --release --workspace --target x86_64-unknown-linux-gnu RUN mkdir /out && cp -R /src/target/x86_64-unknown-linux-gnu/release/. /out FROM scratch AS export-stage -COPY --from=build /out/blockstack-core /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file diff --git a/build-scripts/Dockerfile.macos-arm64 b/build-scripts/Dockerfile.macos-arm64 index 3692d41ad1..56cfe684a3 100644 --- a/build-scripts/Dockerfile.macos-arm64 +++ b/build-scripts/Dockerfile.macos-arm64 @@ -21,4 +21,4 @@ RUN . /opt/osxcross/env-macos-aarch64 && \ RUN mkdir /out && cp -R /src/target/aarch64-apple-darwin/release/. /out FROM scratch AS export-stage -COPY --from=build /out/blockstack-core /out/blockstack-cli /out/clarity-cli /out/stacks-node / +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.macos-x64 b/build-scripts/Dockerfile.macos-x64 index 56a37d7a97..29038b6967 100644 --- a/build-scripts/Dockerfile.macos-x64 +++ b/build-scripts/Dockerfile.macos-x64 @@ -21,4 +21,4 @@ RUN . /opt/osxcross/env-macos-x86_64 && \ RUN mkdir /out && cp -R /src/target/x86_64-apple-darwin/release/. /out FROM scratch AS export-stage -COPY --from=build /out/blockstack-core /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file diff --git a/build-scripts/Dockerfile.windows-x64 b/build-scripts/Dockerfile.windows-x64 index 82bef0cf0b..58785ccba7 100644 --- a/build-scripts/Dockerfile.windows-x64 +++ b/build-scripts/Dockerfile.windows-x64 @@ -19,4 +19,4 @@ RUN CC_x86_64_pc_windows_gnu=x86_64-w64-mingw32-gcc \ RUN mkdir /out && cp -R /src/target/x86_64-pc-windows-gnu/release/. /out FROM scratch AS export-stage -COPY --from=build /out/blockstack-core.exe /out/blockstack-cli.exe /out/clarity-cli.exe /out/stacks-node.exe / \ No newline at end of file +COPY --from=build /out/stacks-inspect.exe /out/blockstack-cli.exe /out/clarity-cli.exe /out/stacks-node.exe / \ No newline at end of file diff --git a/circle.yml b/circle.yml index 6c8a68878d..131712a04a 100644 --- a/circle.yml +++ b/circle.yml @@ -15,15 +15,15 @@ jobs: cargo build - run: command: | - ./target/debug/blockstack-core local initialize db && - ./target/debug/blockstack-core local check sample-contracts/tokens.clar db && - ./target/debug/blockstack-core local launch S1G2081040G2081040G2081040G208105NK8PE5.tokens sample-contracts/tokens.clar db && - ./target/debug/blockstack-core local check sample-contracts/names.clar db && - ./target/debug/blockstack-core local launch S1G2081040G2081040G2081040G208105NK8PE5.names sample-contracts/names.clar db && - ./target/debug/blockstack-core local execute db S1G2081040G2081040G2081040G208105NK8PE5.tokens mint! SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR u100000 + ./target/debug/stacks-inspect local initialize db && + ./target/debug/stacks-inspect local check sample-contracts/tokens.clar db && + ./target/debug/stacks-inspect local launch S1G2081040G2081040G2081040G208105NK8PE5.tokens sample-contracts/tokens.clar db && + ./target/debug/stacks-inspect local check sample-contracts/names.clar db && + ./target/debug/stacks-inspect local launch S1G2081040G2081040G2081040G208105NK8PE5.names sample-contracts/names.clar db && + ./target/debug/stacks-inspect local execute db S1G2081040G2081040G2081040G208105NK8PE5.tokens mint! SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR u100000 - run: command: | - echo "(get-balance 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR)" | ./target/debug/blockstack-core local eval S1G2081040G2081040G2081040G208105NK8PE5.tokens db + echo "(get-balance 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR)" | ./target/debug/stacks-inspect local eval S1G2081040G2081040G2081040G208105NK8PE5.tokens db unit_tests_with_cov: machine: true working_directory: ~/blockstack diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 0329e2a779..09f3e0ebbf 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -66,6 +66,7 @@ use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; use crate::net::{Error as NetError, Error}; use crate::util_lib::db::tx_begin_immediate; use crate::util_lib::db::tx_busy_handler; +use crate::util_lib::db::DBTx; use crate::util_lib::db::Error as db_error; use crate::util_lib::db::{ db_mkdirs, query_count, query_row, query_row_columns, query_row_panic, query_rows, sql_pragma, @@ -2477,7 +2478,7 @@ impl SortitionDB { Ok(version) } - fn apply_schema_2(tx: &SortitionDBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + fn apply_schema_2(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { for sql_exec in SORTITION_DB_SCHEMA_2 { tx.execute_batch(sql_exec)?; } @@ -2492,7 +2493,7 @@ impl SortitionDB { Ok(()) } - fn apply_schema_3(tx: &SortitionDBTx) -> Result<(), db_error> { + fn apply_schema_3(tx: &DBTx) -> Result<(), db_error> { for sql_exec in SORTITION_DB_SCHEMA_3 { tx.execute_batch(sql_exec)?; } @@ -2510,10 +2511,8 @@ impl SortitionDB { if version == expected_version { Ok(()) } else { - Err(db_error::Other(format!( - "The version of the sortition DB {} does not match the expected {} and cannot be updated from SortitionDB::open()", - version, expected_version - ))) + let version_u64 = version.parse::().unwrap(); + Err(db_error::OldSchema(version_u64)) } } Ok(None) => panic!("The schema version of the sortition DB is not recorded."), @@ -2521,19 +2520,23 @@ impl SortitionDB { } } - fn check_schema_version_and_update(&mut self, epochs: &[StacksEpoch]) -> Result<(), db_error> { + /// Migrate the sortition DB to its latest version, given the set of system epochs + pub fn check_schema_version_and_update( + &mut self, + epochs: &[StacksEpoch], + ) -> Result<(), db_error> { let expected_version = SORTITION_DB_VERSION.to_string(); loop { match SortitionDB::get_schema_version(self.conn()) { Ok(Some(version)) => { if version == "1" { let tx = self.tx_begin()?; - SortitionDB::apply_schema_2(&tx, epochs)?; + SortitionDB::apply_schema_2(&tx.deref(), epochs)?; tx.commit()?; } else if version == "2" { // add the tables of schema 3, but do not populate them. let tx = self.tx_begin()?; - SortitionDB::apply_schema_3(&tx)?; + SortitionDB::apply_schema_3(&tx.deref())?; tx.commit()?; } else if version == expected_version { return Ok(()); @@ -2547,6 +2550,24 @@ impl SortitionDB { } } + /// Open and migrate the sortition DB if it exists. + pub fn migrate_if_exists(path: &str, epochs: &[StacksEpoch]) -> Result<(), db_error> { + if let Err(db_error::OldSchema(_)) = SortitionDB::open(path, false) { + let index_path = db_mkdirs(path)?; + let marf = SortitionDB::open_index(&index_path)?; + let mut db = SortitionDB { + marf, + readwrite: true, + // not used by migration logic + first_block_height: 0, + first_burn_header_hash: BurnchainHeaderHash([0xff; 32]), + }; + db.check_schema_version_and_update(epochs) + } else { + Ok(()) + } + } + fn add_indexes(&mut self) -> Result<(), db_error> { // do we need to instantiate indexes? // only do a transaction if we need to, since this gets called each time the sortition DB diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 0e6d13039f..aa9eae266b 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -59,6 +59,8 @@ use crate::types::chainstate::{ }; use clarity::vm::database::BurnStateDB; +use crate::chainstate::stacks::index::marf::MARFOpenOpts; + pub use self::comm::CoordinatorCommunication; pub mod comm; @@ -935,3 +937,35 @@ pub fn check_chainstate_db_versions( Ok(true) } + +/// Migrate all databases to their latest schemas. +/// Verifies that this is possible as well +pub fn migrate_chainstate_dbs( + epochs: &[StacksEpoch], + sortdb_path: &str, + chainstate_path: &str, + chainstate_marf_opts: Option, +) -> Result<(), Error> { + if !check_chainstate_db_versions(epochs, sortdb_path, chainstate_path)? { + warn!("Unable to migrate chainstate DBs to the latest schemas in the current epoch"); + return Err(DBError::TooOldForEpoch.into()); + } + + if fs::metadata(&sortdb_path).is_ok() { + info!("Migrating sortition DB to the latest schema version"); + SortitionDB::migrate_if_exists(&sortdb_path, epochs)?; + } + if fs::metadata(&chainstate_path).is_ok() { + info!("Migrating chainstate DB to the latest schema version"); + let db_config = StacksChainState::get_db_config_from_path(&chainstate_path)?; + + // this does the migration internally + let _ = StacksChainState::open( + db_config.mainnet, + db_config.chain_id, + chainstate_path, + chainstate_marf_opts, + )?; + } + Ok(()) +} diff --git a/src/net/p2p.rs b/src/net/p2p.rs index d02dfe4c15..772a89f3ba 100644 --- a/src/net/p2p.rs +++ b/src/net/p2p.rs @@ -1927,7 +1927,7 @@ impl PeerNetwork { } for (event_id, convo) in self.peers.iter() { - if convo.is_authenticated() { + if convo.is_authenticated() && convo.stats.last_contact_time > 0 { // have handshaked with this remote peer if convo.stats.last_contact_time + (convo.peer_heartbeat as u64) diff --git a/src/util_lib/db.rs b/src/util_lib/db.rs index 597bd95930..4ae33e7dd3 100644 --- a/src/util_lib/db.rs +++ b/src/util_lib/db.rs @@ -106,6 +106,10 @@ pub enum Error { IOError(IOError), /// MARF index error IndexError(MARFError), + /// Old schema error + OldSchema(u64), + /// Database is too old for epoch + TooOldForEpoch, /// Other error Other(String), } @@ -127,6 +131,10 @@ impl fmt::Display for Error { Error::IOError(ref e) => fmt::Display::fmt(e, f), Error::SqliteError(ref e) => fmt::Display::fmt(e, f), Error::IndexError(ref e) => fmt::Display::fmt(e, f), + Error::OldSchema(ref s) => write!(f, "Old database schema: {}", s), + Error::TooOldForEpoch => { + write!(f, "Database is not compatible with current system epoch") + } Error::Other(ref s) => fmt::Display::fmt(s, f), } } @@ -149,6 +157,8 @@ impl error::Error for Error { Error::SqliteError(ref e) => Some(e), Error::IOError(ref e) => Some(e), Error::IndexError(ref e) => Some(e), + Error::OldSchema(ref _s) => None, + Error::TooOldForEpoch => None, Error::Other(ref _s) => None, } } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 128e68c1ad..9cf3bd2cb7 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1319,6 +1319,8 @@ impl StacksNode { // bootstrap nodes *always* allowed let mut tx = peerdb.tx_begin().unwrap(); for initial_neighbor in initial_neighbors.iter() { + // update peer in case public key changed + PeerDB::update_peer(&mut tx, &initial_neighbor).unwrap(); PeerDB::set_allow_peer( &mut tx, initial_neighbor.addr.network_id, diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index fe1a3f5052..df21018288 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -21,10 +21,12 @@ use stacks::burnchains::{Address, Burnchain}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers}; use stacks::chainstate::coordinator::{ - check_chainstate_db_versions, BlockEventDispatcher, ChainsCoordinator, CoordinatorCommunication, + migrate_chainstate_dbs, BlockEventDispatcher, ChainsCoordinator, CoordinatorCommunication, + Error as coord_error, }; use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::net::atlas::{AtlasConfig, Attachment, AttachmentInstance, ATTACHMENTS_CHANNEL_SIZE}; +use stacks::util_lib::db::Error as db_error; use stx_genesis::GenesisData; use crate::monitoring::start_serving_monitoring_metrics; @@ -323,29 +325,29 @@ impl RunLoop { Some(self.should_keep_running.clone()), ); - // Invoke connect() to perform any db instantiation and migration early - if let Err(e) = burnchain_controller.connect_dbs() { - error!("Failed to connect to burnchain databases: {}", e); - panic!(); - }; - - let burnchain_config = burnchain_controller.get_burnchain(); + // Upgrade chainstate databases if they exist already let epochs = burnchain_controller.get_stacks_epochs(); - if !check_chainstate_db_versions( + match migrate_chainstate_dbs( &epochs, &self.config.get_burn_db_file_path(), &self.config.get_chainstate_path_str(), - ) - .expect("FATAL: unable to query filesystem or databases for version information") - { - error!( - "FATAL: chainstate database(s) are not compatible with the current system epoch" - ); - panic!(); + Some(self.config.node.get_marf_opts()), + ) { + Ok(_) => {} + Err(coord_error::DBError(db_error::TooOldForEpoch)) => { + error!( + "FATAL: chainstate database(s) are not compatible with the current system epoch" + ); + panic!(); + } + Err(e) => { + panic!("FATAL: unable to query filesystem or databases: {:?}", &e); + } } info!("Start syncing Bitcoin headers, feel free to grab a cup of coffee, this can take a while"); + let burnchain_config = burnchain_controller.get_burnchain(); let target_burnchain_block_height = match burnchain_config .get_highest_burnchain_block() .expect("FATAL: failed to access burnchain database") @@ -372,6 +374,12 @@ impl RunLoop { } }; + // if the chainstate DBs don't exist, this will instantiate them + if let Err(e) = burnchain_controller.connect_dbs() { + error!("Failed to connect to burnchain databases: {}", e); + panic!(); + }; + // TODO (hack) instantiate the sortdb in the burnchain let _ = burnchain_controller.sortdb_mut(); burnchain_controller