From ec1a8c739d9f63021ddff0c9d2087c930433331d Mon Sep 17 00:00:00 2001 From: katelyn martin Date: Wed, 20 Mar 2024 13:14:22 -0400 Subject: [PATCH] =?UTF-8?q?tests(app):=20=F0=9F=8E=A4=20add=20validator=20?= =?UTF-8?q?uptime=20mock=20consensus=20tests?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fixup, break into distinct commits --- crates/core/app/src/server/consensus.rs | 1 + ...me_for_genesis_validator_missing_blocks.rs | 95 ++++ ..._uptime_for_validators_only_once_active.rs | 456 ++++++++++++++++++ .../app/tests/common/test_node_builder_ext.rs | 16 - crates/core/component/stake/src/uptime.rs | 5 + crates/test/mock-consensus/src/block.rs | 7 +- crates/test/mock-consensus/src/lib.rs | 3 + 7 files changed, 566 insertions(+), 17 deletions(-) create mode 100644 crates/core/app/tests/app_tracks_uptime_for_genesis_validator_missing_blocks.rs create mode 100644 crates/core/app/tests/app_tracks_uptime_for_validators_only_once_active.rs diff --git a/crates/core/app/src/server/consensus.rs b/crates/core/app/src/server/consensus.rs index 37fa3a35ef..0ad2998cfa 100644 --- a/crates/core/app/src/server/consensus.rs +++ b/crates/core/app/src/server/consensus.rs @@ -34,6 +34,7 @@ fn trace_events(events: &[Event]) { impl Consensus { const QUEUE_SIZE: usize = 10; + // TODO(kate): make this accept an `AsRef`. pub fn new(storage: Storage) -> ConsensusService { tower_actor::Actor::new(Self::QUEUE_SIZE, |queue: _| { let storage = storage.clone(); diff --git a/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_missing_blocks.rs b/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_missing_blocks.rs new file mode 100644 index 0000000000..de3dbe43eb --- /dev/null +++ b/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_missing_blocks.rs @@ -0,0 +1,95 @@ +mod common; + +use { + self::common::BuilderExt, + anyhow::Context, + cnidarium::TempStorage, + decaf377_rdsa::{SigningKey, SpendAuth}, + penumbra_app::server::consensus::Consensus, + penumbra_genesis::AppState, + penumbra_keys::test_keys, + penumbra_mock_client::MockClient, + penumbra_mock_consensus::TestNode, + penumbra_proto::DomainType, + penumbra_stake::{ + component::validator_handler::validator_store::ValidatorDataRead, validator::Validator, + FundingStreams, GovernanceKey, IdentityKey, Uptime, + }, + rand_core::OsRng, + tap::Tap, + tracing::{error_span, info, Instrument}, +}; + +#[tokio::test] +// ci: ignore this test and allow some specific warnings, until this test is written. +#[allow(unused_variables, unused_mut, unreachable_code)] // `todo!` fools some lints. +async fn app_tracks_uptime_for_genesis_validator_missing_blocks() -> anyhow::Result<()> { + // Install a test logger, acquire some temporary storage, and start the test node. + let guard = common::set_tracing_subscriber(); + let storage = TempStorage::new().await?; + + // Start the test node. + let mut node = { + let app_state = AppState::default(); + let consensus = Consensus::new(storage.as_ref().clone()); + TestNode::builder() + .single_validator() + .with_penumbra_auto_app_state(app_state)? + .init_chain(consensus) + .await + }?; + + // Create a mock client. + let client = MockClient::new(test_keys::SPEND_KEY.clone()); + + // Retrieve the validator definition from the latest snapshot. + let Validator { identity_key, .. } = match storage + .latest_snapshot() + .validator_definitions() + .tap(|_| info!("getting validator definitions")) + .await? + .as_slice() + { + [v] => v.clone(), + unexpected => panic!("there should be one validator, got: {unexpected:?}"), + }; + let get_uptime = || async { + storage + .latest_snapshot() + .get_validator_uptime(&identity_key) + .await + .expect("should be able to get a validator uptime") + .expect("validator uptime should exist") + }; + + // Show that the uptime starts at with no missed blocks. + assert_eq!( + get_uptime().await.num_missed_blocks(), + 0, + "no blocks have been missed at genesis" + ); + + // Jump ahead a few blocks. + let mut height = 4; + node.fast_forward(height) + .instrument(error_span!("fast forwarding test node {height} blocks")) + .await + .context("fast forwarding {height} blocks")?; + + // Check the validator's uptime once more. We should have uptime data up to the fourth block, + // and the validator should have missed all of the blocks between genesis and now. + { + let uptime = get_uptime().await; + assert_eq!(uptime.as_of_height(), height); + assert_eq!( + uptime.num_missed_blocks(), + /*NB: this is off-by-one */ (height - 1) as usize, + "validator should have missed the last {height} blocks" + ); + } + + Ok(()) + .tap(|_| drop(node)) + .tap(|_| drop(storage)) + .tap(|_| drop(guard)) +} diff --git a/crates/core/app/tests/app_tracks_uptime_for_validators_only_once_active.rs b/crates/core/app/tests/app_tracks_uptime_for_validators_only_once_active.rs new file mode 100644 index 0000000000..e28665b156 --- /dev/null +++ b/crates/core/app/tests/app_tracks_uptime_for_validators_only_once_active.rs @@ -0,0 +1,456 @@ +mod common; + +use { + self::common::BuilderExt, + cnidarium::TempStorage, + decaf377_rdsa::{SigningKey, SpendAuth}, + penumbra_app::server::consensus::Consensus, + penumbra_genesis::AppState, + penumbra_keys::test_keys, + penumbra_mock_client::MockClient, + penumbra_mock_consensus::TestNode, + penumbra_proto::DomainType, + penumbra_sct::component::clock::EpochRead, + penumbra_stake::{ + component::validator_handler::validator_store::ValidatorDataRead, validator::Validator, + FundingStreams, GovernanceKey, IdentityKey, Uptime, + }, + rand_core::OsRng, + tap::Tap, + tracing::{error_span, info, Instrument}, +}; + +#[tokio::test] +// ci: ignore this test and allow some specific warnings, until this test is written. +#[allow(unused_variables, unused_mut, unreachable_code)] // `todo!` fools some lints. +async fn app_tracks_uptime_for_validators_only_once_active() -> anyhow::Result<()> { + /// The length of the [`penumbra_sct`] epoch. + /// + /// This test relies on many epochs turning over, so we will work with a shorter epoch duration. + const EPOCH_DURATION: u64 = 8; + + // Install a test logger, acquire some temporary storage, and start the test node. + let guard = common::set_tracing_subscriber(); + let storage = TempStorage::new().await?; + + // Configure an AppState with slightly shorter epochs than usual. + let app_state = AppState::Content(penumbra_genesis::Content { + sct_content: penumbra_sct::genesis::Content { + sct_params: penumbra_sct::params::SctParameters { + epoch_duration: EPOCH_DURATION, + }, + }, + ..Default::default() + }); + + // Start the test node. + let mut node = { + let consensus = Consensus::new(storage.as_ref().clone()); + TestNode::builder() + .single_validator() + .with_penumbra_auto_app_state(app_state)? + .init_chain(consensus) + .await + }?; + + // Create a mock client. + let mut client = MockClient::new(test_keys::SPEND_KEY.clone()); + + /// Helper function, retrieve a validator's [`Uptime`]. + async fn get_uptime(storage: &TempStorage, id: IdentityKey) -> Option { + storage + .latest_snapshot() + .get_validator_uptime(&id) + .await + .expect("should be able to get a validator uptime") + } + + // Helper function, count the validators in the current consensus set. + let get_latest_consensus_set = || async { + use penumbra_stake::component::ConsensusIndexRead; + storage + .latest_snapshot() + .get_consensus_set() + .await + .expect("latest snapshot should have a valid consensus set") + }; + + // Get the identity key of the genesis validator, before we go further. + // Retrieve the validator definition from the latest snapshot. + let existing_validator_id = { + use penumbra_stake::component::validator_handler::validator_store::ValidatorDataRead; + let validators = &storage + .latest_snapshot() + .validator_definitions() + .tap(|_| info!("getting validator definitions")) + .await?; + match validators.as_slice() { + [Validator { identity_key, .. }] => *identity_key, + unexpected => panic!("there should be one validator, got: {unexpected:?}"), + } + }; + + // To define a validator, we need to define two keypairs: an identity key + // for the Penumbra application and a consensus key for cometbft. + let new_validator_id_sk = SigningKey::::new(OsRng); + let new_validator_id = IdentityKey(new_validator_id_sk.into()); + let new_validator_consensus_sk = ed25519_consensus::SigningKey::new(OsRng); + let new_validator_consensus = new_validator_consensus_sk.verification_key(); + + // Now define the validator's configuration data. + let new_validator = Validator { + identity_key: new_validator_id.clone(), + // TODO: when https://github.com/informalsystems/tendermint-rs/pull/1401 is released, + // replace this with a direct `Into::into()` call. at the time of writing, v0.35.0 is the + // latest version. check for new releases at https://crates.io/crates/tendermint/versions. + consensus_key: tendermint::PublicKey::from_raw_ed25519(&new_validator_consensus.to_bytes()) + .expect("consensus key is valid"), + governance_key: GovernanceKey(new_validator_id_sk.into()), + enabled: true, + sequence_number: 0, + name: "test validator".to_string(), + website: String::default(), + description: String::default(), + funding_streams: FundingStreams::default(), + }; + let new_validator_id = new_validator.identity_key; + + // Helper functions, retrieve validators' [`Uptime`]. + let existing_validator_uptime = || get_uptime(&storage, existing_validator_id); + let new_validator_uptime = || get_uptime(&storage, new_validator_id); + + // Make a transaction that defines the new validator. + let plan = { + use { + penumbra_stake::validator, + penumbra_transaction::{ActionPlan, TransactionParameters, TransactionPlan}, + rand_core::OsRng, + }; + let bytes = new_validator.encode_to_vec(); + let auth_sig = new_validator_id_sk.sign(OsRng, &bytes); + let action = ActionPlan::ValidatorDefinition(validator::Definition { + validator: new_validator.clone(), + auth_sig, + }); + let mut plan = TransactionPlan { + actions: vec![action.into()], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: TestNode::<()>::CHAIN_ID.to_string(), + ..Default::default() + }, + }; + plan.populate_detection_data(rand_core::OsRng, 0); + plan + }; + + // Execute the transaction, applying it to the chain state. + node.block() + .add_tx(client.witness_auth_build(&plan).await?.encode_to_vec()) + .execute() + .instrument(error_span!( + "executing block with validator definition transaction" + )) + .await?; + + // Show that we do not yet have uptime data for the new validator. It is defined, but its + // uptime is not being tracked yet because it is not part of the consensus set. + { + assert_eq!( + get_latest_consensus_set().await.len(), + 1, + "there should only be one validator in the consensus set" + ); + assert!(new_validator_uptime().await.is_none()); + } + + // Now, create a transaction that delegates to the new validator. + let plan = { + use { + penumbra_asset::STAKING_TOKEN_ASSET_ID, + penumbra_sct::component::clock::EpochRead, + penumbra_shielded_pool::{OutputPlan, SpendPlan}, + penumbra_transaction::{ + memo::MemoPlaintext, plan::MemoPlan, TransactionParameters, TransactionPlan, + }, + }; + let snapshot = storage.latest_snapshot(); + client.sync_to_latest(snapshot.clone()).await?; + let rate = snapshot + .get_validator_rate(&new_validator_id) + .await? + .ok_or(anyhow::anyhow!("new validator has a rate"))? + .tap(|rate| tracing::info!(?rate, "got new validator rate")); + let note = client + .notes + .values() + .filter(|n| n.asset_id() == *STAKING_TOKEN_ASSET_ID) + .cloned() + .next() + .expect("the test account should have one staking token note"); + let spend = SpendPlan::new( + &mut rand_core::OsRng, + note.clone(), + client + .position(note.commit()) + .expect("note should be in mock client's tree"), + ); + let delegate = rate.build_delegate( + storage.latest_snapshot().get_current_epoch().await?, + note.amount(), + ); + let output = OutputPlan::new( + &mut rand_core::OsRng, + delegate.delegation_value(), + *test_keys::ADDRESS_1, + ); + let mut plan = TransactionPlan { + actions: vec![spend.into(), output.into(), delegate.into()], + // Now fill out the remaining parts of the transaction needed for verification: + memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) + .map(Some)?, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: TestNode::<()>::CHAIN_ID.to_string(), + ..Default::default() + }, + }; + plan.populate_detection_data(rand_core::OsRng, 0); + plan + }; + let tx = client.witness_auth_build(&plan).await?; + + // Execute the delegation transaction, applying it to the chain state. + node.block() + .add_tx(tx.encode_to_vec()) + .execute() + .instrument(error_span!("executing block with delegation transaction")) + .await?; + let post_delegate_snapshot = storage.latest_snapshot(); + + // The new validator uptime should not yet be in effect. + { + assert_eq!( + get_latest_consensus_set().await.len(), + 1, + "there should only be one validator in the consensus set" + ); + assert!(new_validator_uptime().await.is_none()); + } + + // Fast forward to the next epoch. + { + let get_epoch = || async { storage.latest_snapshot().get_current_epoch().await }; + let start = get_epoch() + .await? + .tap(|start| tracing::info!(?start, "fast forwarding to next epoch")); + let next = loop { + node.block().execute().await?; + let current = get_epoch().await?; + if current != start { + break current; + } + }; + tracing::info!(?start, ?next, "finished fast forwarding to next epoch"); + assert_eq!( + get_latest_consensus_set().await.len(), + 2, + "the delegated validator should now be participating in consensus" + ); + } + + // Show that the uptime trackers look correct for the genesis validator and for our newly + // active validator. + { + let new = new_validator_uptime().await.expect("uptime should exist"); + let existing = existing_validator_uptime() + .await + .expect("uptime should exist"); + assert_eq!( + new.num_missed_blocks(), + // FIXME: promoted validators always miss one block. + // > "tracking is done at the beginning of block execution, and based on the previous + // > block commit h-1 so if a validator is promoted into the active set at h it will + // > always have 1 missed bock - not sure this is worth fixing" + // - @erwanor + 1, + "newly active validator has missed no blocks" + ); + assert_eq!( + new.as_of_height(), + storage.latest_snapshot().get_block_height().await?, + "validators' uptime trackers are up-to-date with latest height" + ); + assert_eq!( + new.as_of_height(), + existing.as_of_height(), + "both validators' uptime trackers are equally recent" + ); + assert_eq!( + existing.num_missed_blocks(), + (EPOCH_DURATION - 1) as usize, + "genesis validator has missed all blocks in the previous epoch" + ); + } + + // Build a transaction that will now undelegate from the validator. + let plan = { + use { + penumbra_sct::component::clock::EpochRead, + penumbra_shielded_pool::{OutputPlan, SpendPlan}, + penumbra_stake::DelegationToken, + penumbra_transaction::{ + memo::MemoPlaintext, plan::MemoPlan, TransactionParameters, TransactionPlan, + }, + }; + let snapshot = storage.latest_snapshot(); + client.sync_to_latest(snapshot.clone()).await?; + let rate = snapshot + .get_validator_rate(&new_validator_id) + .await? + .ok_or(anyhow::anyhow!("new validator has a rate"))? + .tap(|rate| tracing::info!(?rate, "got new validator rate")); + + let undelegation_id = DelegationToken::new(new_validator_id).id(); + let note = client + .notes + .values() + .filter(|n| n.asset_id() == undelegation_id) + .cloned() + .next() + .expect("the test account should have one staking token note"); + let spend = SpendPlan::new( + &mut rand_core::OsRng, + note.clone(), + client + .position(note.commit()) + .expect("note should be in mock client's tree"), + ); + let undelegate = rate.build_undelegate( + storage.latest_snapshot().get_current_epoch().await?, + note.amount(), + ); + let output = OutputPlan::new( + &mut rand_core::OsRng, + undelegate.unbonded_value(), + *test_keys::ADDRESS_1, + ); + + let mut plan = TransactionPlan { + actions: vec![spend.into(), output.into(), undelegate.into()], + // Now fill out the remaining parts of the transaction needed for verification: + memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) + .map(Some)?, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: TestNode::<()>::CHAIN_ID.to_string(), + ..Default::default() + }, + }; + plan.populate_detection_data(rand_core::OsRng, 0); + plan + }; + let tx = client.witness_auth_build(&plan).await?; + + // Execute the undelegation transaction, applying it to the chain state. + node.block() + .add_tx(tx.encode_to_vec()) + .execute() + .instrument(error_span!("executing block with undelegation transaction")) + .await?; + + // We should not yet see the uptime tracker disappear. + assert!( + new_validator_uptime() + .await + .is_some_and(|u| u.num_missed_blocks() == 2), + "new validator uptime should still be tracked after undelegation" + ); + + // Fast forward to the next epoch. + { + let get_epoch = || async { storage.latest_snapshot().get_current_epoch().await }; + let start = get_epoch() + .await? + .tap(|start| tracing::info!(?start, "fast forwarding to next epoch")); + let next = loop { + node.block().execute().await?; + let current = get_epoch().await?; + if current != start { + break current; + } + }; + tracing::info!(?start, ?next, "finished fast forwarding to next epoch"); + assert_eq!( + get_latest_consensus_set().await.len(), + 1, + "the undelegated validator should no longer be participating in consensus" + ); + } + + // Now that the validator is no longer a part of the consensus set, there is not a + // corresponding uptime tracker. + { + let new = new_validator_uptime().await.expect("uptime should exist"); + let existing = existing_validator_uptime() + .await + .expect("uptime should exist"); + assert_eq!( + new.as_of_height(), + EPOCH_DURATION * 2 - 1, + "new validator uptime is not updated after leaving consensus" + ); + assert_eq!( + existing.as_of_height(), + EPOCH_DURATION * 2, + "active validators' uptime is still tracked" + ); + } + + // Fast forward to the next epoch. + { + let get_epoch = || async { storage.latest_snapshot().get_current_epoch().await }; + let start = get_epoch() + .await? + .tap(|start| tracing::info!(?start, "fast forwarding to next epoch")); + let next = loop { + node.block().execute().await?; + let current = get_epoch().await?; + if current != start { + break current; + } + }; + tracing::info!(?start, ?next, "finished fast forwarding to next epoch"); + assert_eq!( + get_latest_consensus_set().await.len(), + 1, + "the undelegated validator should no longer be participating in consensus" + ); + } + + // Even as we continue into the next epoch, we will continue only to update the active + // validators' uptimes. + { + let new = new_validator_uptime().await.expect("uptime should exist"); + let existing = existing_validator_uptime() + .await + .expect("uptime should exist"); + assert_eq!( + new.as_of_height(), + EPOCH_DURATION * 2 - 1, + "new validator uptime is still not updated after leaving consensus" + ); + assert_eq!( + existing.as_of_height(), + EPOCH_DURATION * 3, + "active validators' uptime will continue to be tracked" + ); + } + + Ok(()) + .tap(|_| drop(node)) + .tap(|_| drop(storage)) + .tap(|_| drop(guard)) +} diff --git a/crates/core/app/tests/common/test_node_builder_ext.rs b/crates/core/app/tests/common/test_node_builder_ext.rs index 2433f812ca..dfb4ca0e74 100644 --- a/crates/core/app/tests/common/test_node_builder_ext.rs +++ b/crates/core/app/tests/common/test_node_builder_ext.rs @@ -102,19 +102,3 @@ fn generate_penumbra_validator( (v, allocation) } - -fn log_validator( - PenumbraValidator { - name, - enabled, - sequence_number, - .. - }: &PenumbraValidator, -) { - tracing::trace!( - %name, - %enabled, - %sequence_number, - "injecting validator into app state" - ) -} diff --git a/crates/core/component/stake/src/uptime.rs b/crates/core/component/stake/src/uptime.rs index c20063b647..e44ab52f62 100644 --- a/crates/core/component/stake/src/uptime.rs +++ b/crates/core/component/stake/src/uptime.rs @@ -77,6 +77,11 @@ impl Uptime { pub fn num_missed_blocks(&self) -> usize { self.signatures.iter_zeros().len() } + + /// Returns the block height up to which this tracker has recorded. + pub fn as_of_height(&self) -> u64 { + self.as_of_block_height + } } impl DomainType for Uptime { diff --git a/crates/test/mock-consensus/src/block.rs b/crates/test/mock-consensus/src/block.rs index c013927060..a94a214f97 100644 --- a/crates/test/mock-consensus/src/block.rs +++ b/crates/test/mock-consensus/src/block.rs @@ -28,6 +28,9 @@ pub struct Builder<'e, C> { /// Evidence of malfeasance. evidence: evidence::List, + + /// The list of signatures. + signatures: Vec, } impl TestNode { @@ -37,6 +40,7 @@ impl TestNode { test_node: self, data: Default::default(), evidence: Default::default(), + signatures: Default::default(), } } } @@ -117,6 +121,7 @@ where data, evidence, test_node, + signatures, } = self; let height = { @@ -135,7 +140,7 @@ where height, round: Round::default(), block_id, - signatures: Vec::default(), + signatures, }) } else { None // The first block has no previous commit to speak of. diff --git a/crates/test/mock-consensus/src/lib.rs b/crates/test/mock-consensus/src/lib.rs index 13ae230924..8dbb503cd7 100644 --- a/crates/test/mock-consensus/src/lib.rs +++ b/crates/test/mock-consensus/src/lib.rs @@ -74,6 +74,9 @@ where C::Error: Sized, { /// Fast forwards a number of blocks. + /// + /// Use [`fast_forward_to_next_epoch()`][Self::fast_forward_to_next_epoch] if you would like + /// to fast forward to the start of the next epoch. #[tracing::instrument( skip(self), fields(fast_forward.blocks = %blocks)