diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 76ca85b646..2680a3194f 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -81,13 +81,17 @@ jobs: - tests::nakamoto_integrations::correct_burn_outs - tests::nakamoto_integrations::vote_for_aggregate_key_burn_op - tests::nakamoto_integrations::follower_bootup + - tests::nakamoto_integrations::forked_tenure_is_ignored - tests::signer::stackerdb_dkg - - tests::signer::stackerdb_sign + - tests::signer::stackerdb_sign_request_rejected - tests::signer::stackerdb_block_proposal - tests::signer::stackerdb_filter_bad_transactions - - tests::signer::stackerdb_mine_2_nakamoto_reward_cycles - - tests::signer::stackerdb_sign_after_signer_reboot + # TODO: enable these once v1 signer is fixed + # - tests::signer::stackerdb_mine_2_nakamoto_reward_cycles + # - tests::signer::stackerdb_sign_after_signer_reboot - tests::nakamoto_integrations::stack_stx_burn_op_integration_test + - tests::signer::stackerdb_delayed_dkg + - tests::nakamoto_integrations::check_block_heights # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 39048dc01b..a566eb9e41 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -48,17 +48,6 @@ jobs: name: Rust Format runs-on: ubuntu-latest steps: - - name: Checkout the latest code - id: git_checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Setup Rust Toolchain - id: setup_rust_toolchain - uses: actions-rust-lang/setup-rust-toolchain@f3c84ee10bf5a86e7a5d607d487bf17d57670965 # v1.5.0 - with: - components: rustfmt - cache: false - - name: Rustfmt id: rustfmt uses: stacks-network/actions/rustfmt@main diff --git a/.github/workflows/clarity-js-sdk-pr.yml b/.github/workflows/clarity-js-sdk-pr.yml index 4523808410..6bcd555ca9 100644 --- a/.github/workflows/clarity-js-sdk-pr.yml +++ b/.github/workflows/clarity-js-sdk-pr.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout latest clarity js sdk id: git_checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 with: token: ${{ secrets.GH_TOKEN }} repository: ${{ env.CLARITY_JS_SDK_REPOSITORY }} @@ -46,7 +46,7 @@ jobs: - name: Create Pull Request id: create_pr - uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2 + uses: peter-evans/create-pull-request@6d6857d36972b65feb161a90e484f2984215f83e # v6.0.5 with: token: ${{ secrets.GH_TOKEN }} commit-message: "chore: update clarity-native-bin tag" diff --git a/.github/workflows/docs-pr.yml b/.github/workflows/docs-pr.yml index 7543bdd750..8b005e0402 100644 --- a/.github/workflows/docs-pr.yml +++ b/.github/workflows/docs-pr.yml @@ -36,7 +36,7 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 - name: Build docs id: build_docs @@ -46,7 +46,7 @@ jobs: - name: Checkout latest docs id: git_checkout_docs - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 with: token: ${{ secrets.DOCS_GITHUB_TOKEN }} repository: ${{ env.TARGET_REPOSITORY }} @@ -77,7 +77,7 @@ jobs: - name: Open PR id: open_pr if: ${{ steps.push.outputs.open_pr == '1' }} - uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410 # v6.4.1 + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 with: github-token: ${{ secrets.DOCS_GITHUB_TOKEN }} script: | diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 02243c4cbf..0c8c19176a 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -48,20 +48,23 @@ jobs: ## Downloads the artifacts built in `create-source-binary.yml` - name: Download Artifacts id: download_artifacts - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 with: - name: artifact + pattern: ${{ inputs.tag }}-binary-build-* path: release + merge-multiple: true ## Generate a checksums file to be added to the release page - name: Generate Checksums id: generate_checksum uses: stacks-network/actions/generate-checksum@main + with: + artifact_download_pattern: "${{ inputs.tag }}-binary-build-*" ## Upload the release archives with the checksums file - name: Upload Release id: upload_release - uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 #v0.1.15 + uses: softprops/action-gh-release@69320dbe05506a9a39fc8ae11030b214ec2d1f87 #v2.0.5 env: GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} with: diff --git a/.github/workflows/image-build-binary.yml b/.github/workflows/image-build-binary.yml index 74415e7f16..23e75892fe 100644 --- a/.github/workflows/image-build-binary.yml +++ b/.github/workflows/image-build-binary.yml @@ -62,7 +62,7 @@ jobs: ## ex. debian will have this tag: `type=ref,event=tag,enable=${{ matrix.dist == 'debian' }}` - name: Docker Metadata ( ${{matrix.dist}} ) id: docker_metadata - uses: docker/metadata-action@96383f45573cb7f253c731d3b3ab81c87ef81934 #v5.0.0 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 #v5.5.1 with: ## tag images with current repo name `stacks-core` as well as legacy `stacks-blockchain` images: | @@ -79,7 +79,7 @@ jobs: ## Build docker image for release - name: Build and Push ( ${{matrix.dist}} ) id: docker_build - uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 + uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 with: file: ./.github/actions/dockerfiles/Dockerfile.${{ matrix.dist }}-binary platforms: ${{ env.docker_platforms }} diff --git a/.github/workflows/image-build-source.yml b/.github/workflows/image-build-source.yml index ebb9afc679..e45455f05b 100644 --- a/.github/workflows/image-build-source.yml +++ b/.github/workflows/image-build-source.yml @@ -49,7 +49,7 @@ jobs: ## Set docker metatdata - name: Docker Metadata ( ${{matrix.dist}} ) id: docker_metadata - uses: docker/metadata-action@96383f45573cb7f253c731d3b3ab81c87ef81934 #v5.0.0 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 #v5.5.1 with: images: | ${{env.docker-org}}/${{ github.event.repository.name }} @@ -61,7 +61,7 @@ jobs: ## Build docker image - name: Build and Push ( ${{matrix.dist}} ) id: docker_build - uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 + uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 with: file: ./.github/actions/dockerfiles/Dockerfile.${{matrix.dist}}-source platforms: ${{ env.docker_platforms }} diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 3195f279fc..70ef457ce7 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -127,7 +127,7 @@ jobs: ## checkout the code - name: Checkout the latest code id: git_checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 - name: Run network relay tests id: nettest @@ -145,10 +145,10 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@v3 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 - name: Execute core contract unit tests with clarinet-sdk id: clarinet_unit_test - uses: actions/setup-node@v3 + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: node-version: 18.x cache: "npm" @@ -174,7 +174,7 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@v3 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 - name: Execute core contract unit tests in Clarinet id: clarinet_unit_test_v1 uses: docker://hirosystems/clarinet:1.7.1 diff --git a/CHANGELOG.md b/CHANGELOG.md index c318efebc8..8ff66f9c52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,37 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [2.5.0.0.4] + +### Added + +- Adds the solo stacking scenarios to the stateful property-based testing strategy for PoX-4 (#4725) +- Add signer-key to synthetic stack-aggregation-increase event (#4728) +- Implement the assumed total commit with carry-over (ATC-C) strategy for denying opportunistic Bitcoin miners from mining Stacks at a discount (#4733) +- Adding support for stacks-block-height and tenure-height in Clarity 3 (#4745) +- Preserve PeerNetwork struct when transitioning to 3.0 (#4767) +- Implement singer monitor server error (#4773) +- Pull current stacks signer out into v1 implementation and create placeholder v0 mod (#4778) +- Create new block signature message type for v0 signer (#4787) +- Isolate the rusqlite dependency in stacks-common and clarity behind a cargo feature (#4791) +- Add next_initiative_delay config option to control how frequently the miner checks if a new burnchain block has been processed (#4795) +- Various performance improvements and cleanup + +### Changed + +- Downgraded log messages about transactions from warning to info (#4697) +- Fix race condition between the signer binary and the /v2/pox endpoint (#4738) +- Make node config mock_miner item hot-swappable (#4743) +- Mandates that a burnchain block header be resolved by a BurnchainHeaderReader, which will resolve a block height to at most one burnchain header (#4748) +- Optional config option to resolve DNS of bootstrap nodes (#4749) +- Limit inventory syncs with new peers (#4750) +- Update /v2/fees/transfer to report the median transaction fee estimate for a STX-transfer of 180 bytes (#4754) +- Reduce connection spamming in stackerdb (#4759) +- Remove deprecated signer cli commands (#4772) +- Extra pair of signer slots got introduced at the epoch 2.5 boundary (#4845, #4868, #4891) +- Never consider Stacks chain tips that are not on the canonical burn chain #4886 (#4893) + + ## [2.5.0.0.3] This release fixes a regression in `2.5.0.0.0` from `2.4.0.1.0` caused by git merge diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000000..f410f142e1 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,20 @@ +# These owners will be the default owners for everything in +# the repo. Unless a later match takes precedence, +# @stacks-network/blockchain-team-codeowners will be requested for +# review when someone opens a pull request. +* @stacks-network/blockchain-team-codeowners + +# Generic file extensions that shouldn't require much scrutiny. Anyone with write access to the repo may approve a PR +*.md @stacks-network/blockchain-team +*.yml @stacks-network/blockchain-team +*.yaml @stacks-network/blockchain-team +*.txt @stacks-network/blockchain-team +*.toml @stacks-network/blockchain-team + +# Signer code +libsigner/**/*.rs @stacks-network/blockchain-team-signer +stacks-signer/**/*.rs @stacks-network/blockchain-team-signer + +# CI workflows +./github/workflows/ @stacks-network/blockchain-team-ci +./github/actions/ @stacks-network/blockchain-team-ci \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 9cfd1ad9a1..fa89992f8e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1921,8 +1921,11 @@ version = "0.0.1" dependencies = [ "clarity", "hashbrown 0.14.3", + "lazy_static", "libc", "libstackerdb", + "mutants", + "prometheus", "rand 0.8.5", "rand_core 0.6.4", "secp256k1", @@ -3450,10 +3453,12 @@ dependencies = [ "clap 4.5.0", "clarity", "hashbrown 0.14.3", + "lazy_static", "libsigner", "libstackerdb", "num-traits", "polynomial", + "prometheus", "rand 0.8.5", "rand_core 0.6.4", "reqwest", @@ -3469,6 +3474,7 @@ dependencies = [ "stacks-common", "stackslib", "thiserror", + "tiny_http", "toml 0.5.11", "tracing", "tracing-subscriber", diff --git a/Dockerfile b/Dockerfile index 055cc3df76..5cfacc8ab0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ RUN apk add --no-cache musl-dev RUN mkdir /out -RUN cd testnet/stacks-node && cargo build --features monitoring_prom,slog_json --release +RUN cargo build --features monitoring_prom,slog_json --release RUN cp target/release/stacks-node /out diff --git a/Dockerfile.debian b/Dockerfile.debian index 8b6759527e..ce21964039 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -10,7 +10,7 @@ COPY . . RUN mkdir /out -RUN cd testnet/stacks-node && cargo build --features monitoring_prom,slog_json --release +RUN cargo build --features monitoring_prom,slog_json --release RUN cp target/release/stacks-node /out diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 70cbcec585..8420934af1 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -27,7 +27,7 @@ regex = "1" lazy_static = "1.4.0" integer-sqrt = "0.1.3" slog = { version = "2.5.2", features = [ "max_level_trace" ] } -stacks_common = { package = "stacks-common", path = "../stacks-common" } +stacks_common = { package = "stacks-common", path = "../stacks-common", optional = true, default-features = false } rstest = "0.17.0" rstest_reuse = "0.5.0" hashbrown = { workspace = true } @@ -39,6 +39,7 @@ features = ["arbitrary_precision", "unbounded_depth"] [dependencies.rusqlite] version = "=0.24.2" +optional = true features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] [dependencies.time] @@ -52,8 +53,9 @@ assert-json-diff = "1.0.0" # criterion = "0.3" [features] -default = [] -developer-mode = [] +default = ["canonical"] +canonical = ["rusqlite", "stacks_common/canonical"] +developer-mode = ["stacks_common/developer-mode"] slog_json = ["stacks_common/slog_json"] -testing = [] +testing = ["canonical"] devtools = [] diff --git a/clarity/src/vm/analysis/arithmetic_checker/mod.rs b/clarity/src/vm/analysis/arithmetic_checker/mod.rs index 5595905a48..d4c5bafe96 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/mod.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/mod.rs @@ -148,7 +148,7 @@ impl<'a> ArithmeticOnlyChecker<'a> { { match native_var { ContractCaller | TxSender | TotalLiquidMicroSTX | BlockHeight | BurnBlockHeight - | Regtest | TxSponsor | Mainnet | ChainId => { + | Regtest | TxSponsor | Mainnet | ChainId | StacksBlockHeight | TenureHeight => { Err(Error::VariableForbidden(native_var)) } NativeNone | NativeTrue | NativeFalse => Ok(()), diff --git a/clarity/src/vm/analysis/arithmetic_checker/tests.rs b/clarity/src/vm/analysis/arithmetic_checker/tests.rs index 4ad02c08d5..0e7d520cb3 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/tests.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/tests.rs @@ -22,12 +22,13 @@ use stacks_common::types::StacksEpochId; use crate::vm::analysis::arithmetic_checker::Error::*; use crate::vm::analysis::arithmetic_checker::{ArithmeticOnlyChecker, Error}; -use crate::vm::analysis::{mem_type_check, ContractAnalysis}; +use crate::vm::analysis::ContractAnalysis; use crate::vm::ast::parse; use crate::vm::costs::LimitedCostTracker; use crate::vm::functions::define::DefineFunctions; use crate::vm::functions::NativeFunctions; use crate::vm::tests::test_clarity_versions; +use crate::vm::tooling::mem_type_check; use crate::vm::types::QualifiedContractIdentifier; use crate::vm::variables::NativeVariables; use crate::vm::ClarityVersion; diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index 71fefb6457..257d2e5bbe 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -136,6 +136,7 @@ pub enum CheckErrors { GetBurnBlockInfoExpectPropertyName, NameAlreadyUsed(String), + ReservedWord(String), // expect a function, or applying a function to a list NonFunctionApplication, @@ -408,6 +409,7 @@ impl DiagnosableError for CheckErrors { CheckErrors::GetBlockInfoExpectPropertyName => "missing property name for block info introspection".into(), CheckErrors::GetBurnBlockInfoExpectPropertyName => "missing property name for burn block info introspection".into(), CheckErrors::NameAlreadyUsed(name) => format!("defining '{}' conflicts with previous value", name), + CheckErrors::ReservedWord(name) => format!("{name} is a reserved word"), CheckErrors::NonFunctionApplication => "expecting expression of type function".into(), CheckErrors::ExpectedListApplication => "expecting expression of type list".into(), CheckErrors::ExpectedSequence(found_type) => format!("expecting expression of type 'list', 'buff', 'string-ascii' or 'string-utf8' - found '{}'", found_type), diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 4da10f88bf..6a8f64f1b2 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -37,12 +37,15 @@ use self::type_checker::v2_1::TypeChecker as TypeChecker2_1; pub use self::types::{AnalysisPass, ContractAnalysis}; use crate::vm::ast::{build_ast_with_rules, ASTRules}; use crate::vm::costs::LimitedCostTracker; -use crate::vm::database::{MemoryBackingStore, STORE_CONTRACT_SRC_INTERFACE}; +#[cfg(feature = "canonical")] +use crate::vm::database::MemoryBackingStore; +use crate::vm::database::STORE_CONTRACT_SRC_INTERFACE; use crate::vm::representations::SymbolicExpression; use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; use crate::vm::ClarityVersion; /// Used by CLI tools like the docs generator. Not used in production +#[cfg(feature = "canonical")] pub fn mem_type_check( snippet: &str, version: ClarityVersion, diff --git a/clarity/src/vm/analysis/trait_checker/tests.rs b/clarity/src/vm/analysis/trait_checker/tests.rs index bc3f996284..b1d9bdb222 100644 --- a/clarity/src/vm/analysis/trait_checker/tests.rs +++ b/clarity/src/vm/analysis/trait_checker/tests.rs @@ -1463,7 +1463,7 @@ fn test_dynamic_dispatch_pass_bound_principal_as_trait_in_user_defined_functions _ => panic!("{:?}", err), }; } - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), _ => panic!("got {:?}", result), } } diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index c8185dde74..800347d0f0 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -84,3 +84,21 @@ impl FunctionType { } } } + +fn is_reserved_word_v3(word: &str) -> bool { + match word { + "block-height" => true, + _ => false, + } +} + +/// Is this a reserved word that should trigger an analysis error for the given +/// Clarity version? Note that most of the reserved words do not trigger an +/// analysis error, but will trigger an error at runtime. This should likely be +/// changed in a future Clarity version. +pub fn is_reserved_word(word: &str, version: ClarityVersion) -> bool { + match version { + ClarityVersion::Clarity1 | ClarityVersion::Clarity2 => false, + ClarityVersion::Clarity3 => is_reserved_word_v3(word), + } +} diff --git a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs index d66cad5d4e..2b913a3ac9 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs @@ -323,9 +323,9 @@ fn type_reserved_variable(variable_name: &str) -> CheckResult TypeSignature::BoolType, TotalLiquidMicroSTX => TypeSignature::UIntType, Regtest => TypeSignature::BoolType, - TxSponsor | Mainnet | ChainId => { + TxSponsor | Mainnet | ChainId | StacksBlockHeight | TenureHeight => { return Err(CheckErrors::Expects( - "tx-sponsor, mainnet, and chain-id should not reach here in 2.05".into(), + "tx-sponsor, mainnet, chain-id, stacks-block-height, and tenure-height should not reach here in 2.05".into(), ) .into()) } diff --git a/clarity/src/vm/analysis/type_checker/v2_05/tests/assets.rs b/clarity/src/vm/analysis/type_checker/v2_05/tests/assets.rs index df9c35ed0e..5cfc9ab992 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/tests/assets.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/tests/assets.rs @@ -17,9 +17,10 @@ use stacks_common::types::StacksEpochId; use crate::vm::analysis::errors::CheckErrors; -use crate::vm::analysis::{mem_type_check, AnalysisDatabase}; +use crate::vm::analysis::AnalysisDatabase; use crate::vm::ast::parse; use crate::vm::database::MemoryBackingStore; +use crate::vm::tooling::mem_type_check; use crate::vm::types::{ QualifiedContractIdentifier, SequenceSubtype, StringSubtype, TypeSignature, }; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/contexts.rs b/clarity/src/vm/analysis/type_checker/v2_1/contexts.rs index 8cbed1a416..d210194ea4 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/contexts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/contexts.rs @@ -19,6 +19,7 @@ use std::collections::BTreeMap; use hashbrown::{HashMap, HashSet}; use crate::vm::analysis::errors::{CheckError, CheckErrors, CheckResult}; +use crate::vm::analysis::type_checker::is_reserved_word; use crate::vm::analysis::types::ContractAnalysis; use crate::vm::contexts::MAX_CONTEXT_DEPTH; use crate::vm::representations::{ClarityName, SymbolicExpression}; @@ -42,7 +43,7 @@ impl TraitContext { pub fn new(clarity_version: ClarityVersion) -> TraitContext { match clarity_version { ClarityVersion::Clarity1 => Self::Clarity1(HashMap::new()), - ClarityVersion::Clarity2 => Self::Clarity2 { + ClarityVersion::Clarity2 | ClarityVersion::Clarity3 => Self::Clarity2 { defined: HashSet::new(), all: HashMap::new(), }, @@ -128,6 +129,7 @@ impl TraitContext { } pub struct ContractContext { + clarity_version: ClarityVersion, contract_identifier: QualifiedContractIdentifier, map_types: HashMap, variable_types: HashMap, @@ -147,6 +149,7 @@ impl ContractContext { clarity_version: ClarityVersion, ) -> ContractContext { ContractContext { + clarity_version, contract_identifier, variable_types: HashMap::new(), private_function_types: HashMap::new(), @@ -168,6 +171,10 @@ impl ContractContext { } pub fn check_name_used(&self, name: &str) -> CheckResult<()> { + if is_reserved_word(name, self.clarity_version) { + return Err(CheckError::new(CheckErrors::ReservedWord(name.to_string()))); + } + if self.variable_types.contains_key(name) || self.persisted_variable_types.contains_key(name) || self.private_function_types.contains_key(name) @@ -279,6 +286,10 @@ impl ContractContext { trait_name: ClarityName, trait_signature: BTreeMap, ) -> CheckResult<()> { + if self.clarity_version >= ClarityVersion::Clarity3 { + self.check_name_used(&trait_name)?; + } + self.traits.add_defined_trait( self.contract_identifier.clone(), trait_name, @@ -292,6 +303,10 @@ impl ContractContext { trait_id: TraitIdentifier, trait_signature: BTreeMap, ) -> CheckResult<()> { + if self.clarity_version >= ClarityVersion::Clarity3 { + self.check_name_used(&alias)?; + } + self.traits.add_used_trait(alias, trait_id, trait_signature) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index b61d3bb6e2..7caf775c19 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -37,6 +37,7 @@ use crate::vm::costs::{ analysis_typecheck_cost, cost_functions, runtime_cost, ClarityCostFunctionReference, CostErrors, CostOverflowingMath, CostTracker, ExecutionCost, LimitedCostTracker, }; +use crate::vm::diagnostic::Diagnostic; use crate::vm::functions::define::DefineFunctionsParsed; use crate::vm::functions::NativeFunctions; use crate::vm::representations::SymbolicExpressionType::{ @@ -151,7 +152,130 @@ impl TypeChecker<'_, '_> { pub type TypeResult = CheckResult; +pub fn compute_typecheck_cost( + track: &mut T, + t1: &TypeSignature, + t2: &TypeSignature, +) -> Result { + let t1_size = t1.type_size().map_err(|_| CostErrors::CostOverflow)?; + let t2_size = t2.type_size().map_err(|_| CostErrors::CostOverflow)?; + track.compute_cost( + ClarityCostFunction::AnalysisTypeCheck, + &[std::cmp::max(t1_size, t2_size).into()], + ) +} + +pub fn check_argument_len(expected: usize, args_len: usize) -> Result<(), CheckErrors> { + if args_len != expected { + Err(CheckErrors::IncorrectArgumentCount(expected, args_len)) + } else { + Ok(()) + } +} + impl FunctionType { + pub fn check_args_visitor_2_1( + &self, + accounting: &mut T, + arg_type: &TypeSignature, + arg_index: usize, + accumulated_type: Option<&TypeSignature>, + ) -> ( + Option>, + CheckResult>, + ) { + match self { + // variadic stops checking cost at the first error... + FunctionType::Variadic(expected_type, _) => { + let cost = Some(compute_typecheck_cost(accounting, expected_type, arg_type)); + let admitted = match expected_type.admits_type(&StacksEpochId::Epoch21, arg_type) { + Ok(admitted) => admitted, + Err(e) => return (cost, Err(e.into())), + }; + if !admitted { + return ( + cost, + Err(CheckErrors::TypeError(expected_type.clone(), arg_type.clone()).into()), + ); + } + (cost, Ok(None)) + } + FunctionType::ArithmeticVariadic => { + let cost = Some(compute_typecheck_cost( + accounting, + &TypeSignature::IntType, + arg_type, + )); + if arg_index == 0 { + let return_type = match arg_type { + TypeSignature::IntType => Ok(Some(TypeSignature::IntType)), + TypeSignature::UIntType => Ok(Some(TypeSignature::UIntType)), + _ => Err(CheckErrors::UnionTypeError( + vec![TypeSignature::IntType, TypeSignature::UIntType], + arg_type.clone(), + ) + .into()), + }; + (cost, return_type) + } else { + let return_type = accumulated_type + .ok_or_else(|| CheckErrors::Expects("Failed to set accumulated type for arg indices >= 1 in variadic arithmetic".into()).into()); + let check_result = return_type.and_then(|return_type| { + if arg_type != return_type { + Err( + CheckErrors::TypeError(return_type.clone(), arg_type.clone()) + .into(), + ) + } else { + Ok(None) + } + }); + (cost, check_result) + } + } + // For the fixed function types, the visitor will just + // tell the processor that any results greater than the args len + // do not need to be stored, because an error will occur before + // further checking anyways + FunctionType::Fixed(FixedFunction { + args: arg_types, .. + }) => { + if arg_index >= arg_types.len() { + // note: argument count will be wrong? + return ( + None, + Err(CheckErrors::IncorrectArgumentCount(arg_types.len(), arg_index).into()), + ); + } + return (None, Ok(None)); + } + // For the following function types, the visitor will just + // tell the processor that any results greater than len 1 or 2 + // do not need to be stored, because an error will occur before + // further checking anyways + FunctionType::ArithmeticUnary | FunctionType::UnionArgs(..) => { + if arg_index >= 1 { + return ( + None, + Err(CheckErrors::IncorrectArgumentCount(1, arg_index).into()), + ); + } + return (None, Ok(None)); + } + FunctionType::ArithmeticBinary + | FunctionType::ArithmeticComparison + | FunctionType::Binary(..) => { + if arg_index >= 2 { + return ( + None, + Err(CheckErrors::IncorrectArgumentCount(2, arg_index).into()), + ); + } + return (None, Ok(None)); + } + } + } + pub fn check_args_2_1( &self, accounting: &mut T, @@ -858,6 +982,8 @@ fn type_reserved_variable( .map_err(|_| CheckErrors::Expects("Bad construction".into()))?, ContractCaller => TypeSignature::PrincipalType, BlockHeight => TypeSignature::UIntType, + StacksBlockHeight => TypeSignature::UIntType, + TenureHeight => TypeSignature::UIntType, BurnBlockHeight => TypeSignature::UIntType, NativeNone => TypeSignature::new_option(no_type()) .map_err(|_| CheckErrors::Expects("Bad construction".into()))?, @@ -1015,17 +1141,23 @@ impl<'a, 'b> TypeChecker<'a, 'b> { args: &[SymbolicExpression], context: &TypingContext, ) -> TypeResult { - let mut types_returned = self.type_check_all(args, context)?; - - let last_return = types_returned - .pop() - .ok_or(CheckError::new(CheckErrors::CheckerImplementationFailure))?; - - for type_return in types_returned.iter() { - if type_return.is_response_type() { - return Err(CheckErrors::UncheckedIntermediaryResponses.into()); + let mut last_return = None; + let mut return_failure = Ok(()); + for ix in 0..args.len() { + let type_return = self.type_check(&args[ix], context)?; + if ix + 1 < args.len() { + if type_return.is_response_type() { + return_failure = Err(CheckErrors::UncheckedIntermediaryResponses); + } + } else { + last_return = Some(type_return); } } + + let last_return = last_return + .ok_or_else(|| CheckError::new(CheckErrors::CheckerImplementationFailure))?; + return_failure?; + Ok(last_return) } @@ -1050,8 +1182,56 @@ impl<'a, 'b> TypeChecker<'a, 'b> { epoch: StacksEpochId, clarity_version: ClarityVersion, ) -> TypeResult { - let typed_args = self.type_check_all(args, context)?; - func_type.check_args(self, &typed_args, epoch, clarity_version) + if epoch <= StacksEpochId::Epoch2_05 { + let typed_args = self.type_check_all(args, context)?; + return func_type.check_args(self, &typed_args, epoch, clarity_version); + } + // use func_type visitor pattern + let mut accumulated_type = None; + let mut total_costs = vec![]; + let mut check_result = Ok(()); + let mut accumulated_types = Vec::new(); + for (arg_ix, arg_expr) in args.iter().enumerate() { + let arg_type = self.type_check(arg_expr, context)?; + if check_result.is_ok() { + let (costs, result) = func_type.check_args_visitor_2_1( + self, + &arg_type, + arg_ix, + accumulated_type.as_ref(), + ); + // add the accumulated type and total cost *before* + // checking for an error: we want the subsequent error handling + // to account for this cost + accumulated_types.push(arg_type); + total_costs.extend(costs); + + match result { + Ok(Some(returned_type)) => { + accumulated_type = Some(returned_type); + } + Ok(None) => {} + Err(e) => { + check_result = Err(e); + } + }; + } + } + if let Err(mut check_error) = check_result { + if let CheckErrors::IncorrectArgumentCount(expected, _actual) = check_error.err { + check_error.err = CheckErrors::IncorrectArgumentCount(expected, args.len()); + check_error.diagnostic = Diagnostic::err(&check_error.err) + } + // accumulate the checking costs + // the reason we do this now (instead of within the loop) is for backwards compatibility + for cost in total_costs.into_iter() { + self.add_cost(cost?)?; + } + + return Err(check_error); + } + // otherwise, just invoke the normal checking routine + func_type.check_args(self, &accumulated_types, epoch, clarity_version) } fn get_function_type(&self, function_name: &str) -> Option { diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index c5aefb65ed..e5fc32c67f 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -17,8 +17,8 @@ use stacks_common::types::StacksEpochId; use super::{ - check_argument_count, check_arguments_at_least, check_arguments_at_most, no_type, TypeChecker, - TypeResult, TypingContext, + check_argument_count, check_arguments_at_least, check_arguments_at_most, + compute_typecheck_cost, no_type, TypeChecker, TypeResult, TypingContext, }; use crate::vm::analysis::errors::{CheckError, CheckErrors, CheckResult}; use crate::vm::costs::cost_functions::ClarityCostFunction; @@ -61,14 +61,43 @@ fn check_special_list_cons( args: &[SymbolicExpression], context: &TypingContext, ) -> TypeResult { - let typed_args = checker.type_check_all(args, context)?; - for type_arg in typed_args.iter() { - runtime_cost( - ClarityCostFunction::AnalysisListItemsCheck, - checker, - type_arg.type_size()?, - )?; + let mut result = Vec::with_capacity(args.len()); + let mut entries_size: Option = Some(0); + let mut costs = Vec::with_capacity(args.len()); + + for arg in args.iter() { + // don't use map here, since type_check has side-effects. + let checked = checker.type_check(arg, context)?; + let cost = checked.type_size().and_then(|ty_size| { + checker + .compute_cost( + ClarityCostFunction::AnalysisListItemsCheck, + &[ty_size.into()], + ) + .map_err(CheckErrors::from) + }); + costs.push(cost); + + if let Some(cur_size) = entries_size.clone() { + entries_size = cur_size.checked_add(checked.size()?); + } + if let Some(cur_size) = entries_size { + if cur_size > MAX_VALUE_SIZE { + entries_size = None; + } + } + if entries_size.is_some() { + result.push(checked); + } + } + + for cost in costs.into_iter() { + checker.add_cost(cost?)?; + } + if entries_size.is_none() { + return Err(CheckErrors::ValueTooLarge.into()); } + let typed_args = result; TypeSignature::parent_list_type(&typed_args) .map_err(|x| x.into()) .map(TypeSignature::from) @@ -202,6 +231,9 @@ pub fn check_special_tuple_cons( args.len(), )?; + let mut type_size = 0u32; + let mut cons_error = Ok(()); + handle_binding_list(args, |var_name, var_sexp| { checker.type_check(var_sexp, context).and_then(|var_type| { runtime_cost( @@ -209,11 +241,21 @@ pub fn check_special_tuple_cons( checker, var_type.type_size()?, )?; - tuple_type_data.push((var_name.clone(), var_type)); + if type_size < MAX_VALUE_SIZE { + type_size = type_size + .saturating_add(var_name.len() as u32) + .saturating_add(var_name.len() as u32) + .saturating_add(var_type.type_size()?) + .saturating_add(var_type.size()?); + tuple_type_data.push((var_name.clone(), var_type)); + } else { + cons_error = Err(CheckErrors::BadTupleConstruction); + } Ok(()) }) })?; + cons_error?; let tuple_signature = TupleTypeSignature::try_from(tuple_type_data) .map_err(|_e| CheckErrors::BadTupleConstruction)?; @@ -338,15 +380,33 @@ fn check_special_equals( ) -> TypeResult { check_arguments_at_least(1, args)?; - let arg_types = checker.type_check_all(args, context)?; + let mut arg_type = None; + let mut costs = Vec::with_capacity(args.len()); - let mut arg_type = arg_types[0].clone(); - for x_type in arg_types.into_iter() { - analysis_typecheck_cost(checker, &x_type, &arg_type)?; - arg_type = TypeSignature::least_supertype(&StacksEpochId::Epoch21, &x_type, &arg_type) - .map_err(|_| CheckErrors::TypeError(x_type, arg_type))?; + for arg in args.iter() { + let x_type = checker.type_check(arg, context)?; + if arg_type.is_none() { + arg_type = Some(Ok(x_type.clone())); + } + if let Some(Ok(cur_type)) = arg_type { + let cost = compute_typecheck_cost(checker, &x_type, &cur_type); + costs.push(cost); + arg_type = Some( + TypeSignature::least_supertype(&StacksEpochId::Epoch21, &x_type, &cur_type) + .map_err(|_| CheckErrors::TypeError(x_type, cur_type)), + ); + } } + for cost in costs.into_iter() { + checker.add_cost(cost?)?; + } + + // check if there was a least supertype failure. + arg_type.ok_or_else(|| { + CheckErrors::Expects("Arg type should be set because arguments checked for >= 1".into()) + })??; + Ok(TypeSignature::BoolType) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs index 090b259a26..c1b3aabb17 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs @@ -22,7 +22,8 @@ use crate::vm::analysis::type_checker::v2_1::{ TypeResult, TypingContext, }; use crate::vm::costs::cost_functions::ClarityCostFunction; -use crate::vm::costs::{analysis_typecheck_cost, cost_functions, runtime_cost}; +use crate::vm::costs::{analysis_typecheck_cost, cost_functions, runtime_cost, CostTracker}; +use crate::vm::diagnostic::Diagnostic; use crate::vm::functions::NativeFunctions; use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType}; pub use crate::vm::types::signatures::{BufferLength, ListTypeData, StringUTF8Length, BUFF_1}; @@ -73,9 +74,15 @@ pub fn check_special_map( )?; let iter = args[1..].iter(); - let mut func_args = Vec::with_capacity(iter.len()); let mut min_args = u32::MAX; - for arg in iter { + + // use func_type visitor pattern + let mut accumulated_type = None; + let mut total_costs = vec![]; + let mut check_result = Ok(()); + let mut accumulated_types = Vec::new(); + + for (arg_ix, arg) in iter.enumerate() { let argument_type = checker.type_check(arg, context)?; let entry_type = match argument_type { TypeSignature::SequenceType(sequence) => { @@ -101,11 +108,52 @@ pub fn check_special_map( return Err(CheckErrors::ExpectedSequence(argument_type).into()); } }; - func_args.push(entry_type); + + if check_result.is_ok() { + let (costs, result) = function_type.check_args_visitor_2_1( + checker, + &entry_type, + arg_ix, + accumulated_type.as_ref(), + ); + // add the accumulated type and total cost *before* + // checking for an error: we want the subsequent error handling + // to account for this cost + accumulated_types.push(entry_type); + total_costs.extend(costs); + + match result { + Ok(Some(returned_type)) => { + accumulated_type = Some(returned_type); + } + Ok(None) => {} + Err(e) => { + check_result = Err(e); + } + }; + } } - let mapped_type = - function_type.check_args(checker, &func_args, context.epoch, context.clarity_version)?; + if let Err(mut check_error) = check_result { + if let CheckErrors::IncorrectArgumentCount(expected, _actual) = check_error.err { + check_error.err = + CheckErrors::IncorrectArgumentCount(expected, args.len().saturating_sub(1)); + check_error.diagnostic = Diagnostic::err(&check_error.err) + } + // accumulate the checking costs + for cost in total_costs.into_iter() { + checker.add_cost(cost?)?; + } + + return Err(check_error); + } + + let mapped_type = function_type.check_args( + checker, + &accumulated_types, + context.epoch, + context.clarity_version, + )?; TypeSignature::list_of(mapped_type, min_args) .map_err(|_| CheckErrors::ConstructedListTooLarge.into()) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs index c870fdbab7..ba120575bd 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs @@ -130,7 +130,7 @@ fn test_names_tokens_contracts(#[case] version: ClarityVersion, #[case] epoch: S #[test] fn test_bad_asset_usage() { - use crate::vm::analysis::type_check; + use crate::vm::analysis::mem_type_check as mem_run_analysis; let bad_scripts = [ "(ft-get-balance stackoos tx-sender)", @@ -218,7 +218,12 @@ fn test_bad_asset_usage() { for (script, expected_err) in bad_scripts.iter().zip(expected.iter()) { let tokens_contract = format!("{}\n{}", FIRST_CLASS_TOKENS, script); - let actual_err = mem_type_check(&tokens_contract).unwrap_err(); + let actual_err = mem_run_analysis( + &tokens_contract, + ClarityVersion::Clarity2, + StacksEpochId::latest(), + ) + .unwrap_err(); println!("{}", script); assert_eq!(&actual_err.err, expected_err); } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index 99942ba42c..b87177062c 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -433,7 +433,7 @@ fn test_names_tokens_contracts_interface() { "fungible_tokens": [], "non_fungible_tokens": [], "epoch": "Epoch21", - "clarity_version": "Clarity2" + "clarity_version": "Clarity3" }"#).unwrap(); eprintln!("{}", test_contract_json_str); @@ -1894,7 +1894,7 @@ fn clarity_trait_experiments_double_trait( // Can we define a trait with two methods with the same name and different types? match db.execute(|db| load_versioned(db, "double-trait", version, epoch)) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -1915,7 +1915,7 @@ fn clarity_trait_experiments_impl_double_trait_both( load_versioned(db, "impl-double-trait-both", version, epoch) }) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -1938,7 +1938,7 @@ fn clarity_trait_experiments_impl_double_trait_1( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("BadTraitImplementation(\"double-method\", \"foo\")")) } - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -1959,7 +1959,7 @@ fn clarity_trait_experiments_impl_double_trait_2( load_versioned(db, "impl-double-trait-2", version, epoch) }) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -1983,7 +1983,7 @@ fn clarity_trait_experiments_use_double_trait( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError(BoolType, UIntType)")) } - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -2007,7 +2007,7 @@ fn clarity_trait_experiments_use_partial_double_trait_1( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError(BoolType, UIntType)")) } - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -2029,7 +2029,7 @@ fn clarity_trait_experiments_use_partial_double_trait_2( load_versioned(db, "use-partial-double-trait-2", version, epoch) }) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -2047,7 +2047,7 @@ fn clarity_trait_experiments_identical_double_trait( // Can we define a trait with two methods with the same name and the same type? match db.execute(|db| load_versioned(db, "identical-double-trait", version, epoch)) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -2068,7 +2068,7 @@ fn clarity_trait_experiments_impl_identical_double_trait( load_versioned(db, "impl-identical-double-trait", version, epoch) }) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -2126,7 +2126,7 @@ fn clarity_trait_experiments_use_math_trait_transitive_name( load_versioned(db, "use-math-trait-transitive-name", version, epoch) }) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("TraitReferenceUnknown(\"math-alias\")")) } res => panic!("got {:?}", res), @@ -2147,7 +2147,7 @@ fn clarity_trait_experiments_use_original_and_define_a_trait( load_versioned(db, "use-original-and-define-a-trait", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TraitMethodUnknown(\"a\", \"do-it\")")) } @@ -2170,7 +2170,7 @@ fn clarity_trait_experiments_use_redefined_and_define_a_trait( load_versioned(db, "use-redefined-and-define-a-trait", version, epoch) }) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("TraitMethodUnknown(\"a\", \"do-that\")")) } res => panic!("got {:?}", res), @@ -2266,7 +2266,7 @@ fn clarity_trait_experiments_call_nested_trait_1( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError")) } - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), res => panic!("got {:?}", res), }; } @@ -2292,7 +2292,7 @@ fn clarity_trait_experiments_call_nested_trait_2( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError")) } - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), res => panic!("got {:?}", res), }; } @@ -2318,7 +2318,7 @@ fn clarity_trait_experiments_call_nested_trait_3_ok( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError")) } - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), res => panic!("got {:?}", res), }; } @@ -2374,7 +2374,7 @@ fn clarity_trait_experiments_call_nested_trait_4( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError")) } - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), res => panic!("got {:?}", res), }; } @@ -2469,7 +2469,7 @@ fn clarity_trait_experiments_call_let_rename_trait( load_versioned(db, "call-let-rename-trait", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TraitReferenceUnknown(\"new-math-contract\")")) } @@ -2637,7 +2637,7 @@ fn clarity_trait_experiments_constant_call( load_versioned(db, "constant-call", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TraitReferenceUnknown(\"principal-value\")")) } @@ -2660,7 +2660,7 @@ fn clarity_trait_experiments_constant_to_trait( load_versioned(db, "constant-to-trait", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType")) } @@ -2687,7 +2687,7 @@ fn clarity_trait_experiments_constant_to_constant_call( load_versioned(db, "constant-to-constant-call", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType")) } @@ -2740,7 +2740,9 @@ fn clarity_trait_experiments_downcast_literal_2( }) .unwrap_err(); match version { - ClarityVersion::Clarity2 => assert!(err.starts_with("ExpectedCallableType(PrincipalType)")), + ClarityVersion::Clarity2 | ClarityVersion::Clarity3 => { + assert!(err.starts_with("ExpectedCallableType(PrincipalType)")) + } ClarityVersion::Clarity1 => { assert!(err.starts_with("TraitReferenceUnknown(\"principal-value\")")) } @@ -2874,7 +2876,7 @@ fn clarity_trait_experiments_identical_trait_cast( load_versioned(db, "identical-trait-cast", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier")) } @@ -2900,7 +2902,7 @@ fn clarity_trait_experiments_trait_cast( load_versioned(db, "trait-cast", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier")) } @@ -2935,7 +2937,9 @@ fn clarity_trait_experiments_trait_cast_incompatible( assert!(err.starts_with("TypeError(CallableType(Trait(TraitIdentifier")) } } - ClarityVersion::Clarity2 => assert!(err.starts_with("IncompatibleTrait")), + ClarityVersion::Clarity2 | ClarityVersion::Clarity3 => { + assert!(err.starts_with("IncompatibleTrait")) + } } } @@ -3208,7 +3212,7 @@ fn clarity_trait_experiments_call_full_double_trait( }); match result { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -3239,7 +3243,7 @@ fn clarity_trait_experiments_call_partial_double_trait( }); match result { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -3290,7 +3294,7 @@ fn clarity_trait_experiments_principals_list_to_traits_list( load_versioned(db, "list-of-principals", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError(SequenceType(ListType")) } @@ -3333,7 +3337,7 @@ fn clarity_trait_experiments_mixed_list_to_traits_list( load_versioned(db, "mixed-list", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType")) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 85a6b39ea9..0333045c5a 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -29,7 +29,6 @@ use crate::vm::analysis::{mem_type_check as mem_run_analysis, AnalysisDatabase}; use crate::vm::ast::errors::ParseErrors; use crate::vm::ast::{build_ast, parse}; use crate::vm::contexts::OwnedEnvironment; -use crate::vm::database::MemoryBackingStore; use crate::vm::representations::SymbolicExpression; use crate::vm::tests::test_clarity_versions; use crate::vm::types::signatures::TypeSignature::OptionalType; diff --git a/clarity/src/vm/ast/definition_sorter/tests.rs b/clarity/src/vm/ast/definition_sorter/tests.rs index d0b24164ae..2c993db266 100644 --- a/clarity/src/vm/ast/definition_sorter/tests.rs +++ b/clarity/src/vm/ast/definition_sorter/tests.rs @@ -25,7 +25,6 @@ use crate::vm::ast::errors::{ParseErrors, ParseResult}; use crate::vm::ast::expression_identifier::ExpressionIdentifier; use crate::vm::ast::parser; use crate::vm::ast::types::{BuildASTPass, ContractAST}; -use crate::vm::database::MemoryBackingStore; use crate::vm::types::QualifiedContractIdentifier; use crate::vm::ClarityVersion; diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 7ad70e4ffb..3f9c5bf4f7 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -74,7 +74,7 @@ pub struct Environment<'a, 'b, 'hooks> { } pub struct OwnedEnvironment<'a, 'hooks> { - context: GlobalContext<'a, 'hooks>, + pub(crate) context: GlobalContext<'a, 'hooks>, call_stack: CallStack, } @@ -973,7 +973,11 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let contract = self .global_context .database - .get_contract(contract_identifier)?; + .get_contract(contract_identifier) + .or_else(|e| { + self.global_context.roll_back()?; + Err(e) + })?; let result = { let mut nested_env = Environment::new( diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 744b605691..3424610b35 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -19,7 +19,6 @@ use std::{cmp, fmt}; use hashbrown::HashMap; use lazy_static::lazy_static; -use rusqlite::types::{FromSql, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use serde::{Deserialize, Serialize}; use stacks_common::types::StacksEpochId; @@ -1166,23 +1165,6 @@ impl fmt::Display for ExecutionCost { } } -impl ToSql for ExecutionCost { - fn to_sql(&self) -> rusqlite::Result { - let val = serde_json::to_string(self) - .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?; - Ok(ToSqlOutput::from(val)) - } -} - -impl FromSql for ExecutionCost { - fn column_result(value: ValueRef) -> FromSqlResult { - let str_val = String::column_result(value)?; - let parsed = serde_json::from_str(&str_val) - .map_err(|e| rusqlite::types::FromSqlError::Other(Box::new(e)))?; - Ok(parsed) - } -} - pub trait CostOverflowingMath { fn cost_overflow_mul(self, other: T) -> Result; fn cost_overflow_add(self, other: T) -> Result; diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index b395f88c6d..7a1aa3e3bc 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -54,6 +54,7 @@ use crate::vm::types::{ }; pub const STORE_CONTRACT_SRC_INTERFACE: bool = true; +const TENURE_HEIGHT_KEY: &str = "_stx-data::tenure_height"; pub type StacksEpoch = GenericStacksEpoch; @@ -855,6 +856,38 @@ impl<'a> ClarityDatabase<'a> { Ok(()) } + /// Returns the tenure height of the current block. + pub fn get_tenure_height(&mut self) -> Result { + if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { + // Before epoch 3.0, the tenure height was not stored in the + // Clarity state. Instead, it was the same as the block height. + return Ok(self.get_current_block_height()); + } + + self.get_data(TENURE_HEIGHT_KEY)? + .ok_or_else(|| { + InterpreterError::Expect("No tenure height in stored Clarity state".into()).into() + }) + .and_then(|x| { + u32::try_into(x).map_err(|_| { + InterpreterError::Expect("Bad tenure height in stored Clarity state".into()) + .into() + }) + }) + } + + /// Set the tenure height of the current block. In the first block of a new + /// tenure, this height must be incremented before evaluating any + /// transactions in the block. + pub fn set_tenure_height(&mut self, height: u32) -> Result<()> { + if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { + return Err(Error::Interpreter(InterpreterError::Expect( + "Setting tenure height in Clarity state is not supported before epoch 3.0".into(), + ))); + } + self.put_data(TENURE_HEIGHT_KEY, &height) + } + pub fn destroy(self) -> RollbackWrapper<'a> { self.store } diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index afe2c550ba..b6a45ee764 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -16,15 +16,18 @@ use std::path::PathBuf; +#[cfg(feature = "canonical")] use rusqlite::Connection; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, VRFSeed}; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use crate::vm::analysis::AnalysisDatabase; use crate::vm::contexts::GlobalContext; +#[cfg(feature = "canonical")] +use crate::vm::database::SqliteConnection; use crate::vm::database::{ BurnStateDB, ClarityDatabase, ClarityDeserializable, ClaritySerializable, HeadersDB, - SqliteConnection, NULL_BURN_STATE_DB, NULL_HEADER_DB, + NULL_BURN_STATE_DB, NULL_HEADER_DB, }; use crate::vm::errors::{ CheckErrors, IncomparableError, InterpreterError, InterpreterResult as Result, @@ -83,6 +86,8 @@ pub trait ClarityBackingStore { fn get_open_chain_tip_height(&mut self) -> u32; fn get_open_chain_tip(&mut self) -> StacksBlockId; + + #[cfg(feature = "canonical")] fn get_side_store(&mut self) -> &Connection; fn get_cc_special_cases_handler(&self) -> Option { @@ -106,59 +111,27 @@ pub trait ClarityBackingStore { fn get_contract_hash( &mut self, contract: &QualifiedContractIdentifier, - ) -> Result<(StacksBlockId, Sha512Trunc256Sum)> { - let key = make_contract_hash_key(contract); - let contract_commitment = self - .get_data(&key)? - .map(|x| ContractCommitment::deserialize(&x)) - .ok_or_else(|| CheckErrors::NoSuchContract(contract.to_string()))?; - let ContractCommitment { - block_height, - hash: contract_hash, - } = contract_commitment?; - let bhh = self.get_block_at_height(block_height) - .ok_or_else(|| InterpreterError::Expect("Should always be able to map from height to block hash when looking up contract information.".into()))?; - Ok((bhh, contract_hash)) - } + ) -> Result<(StacksBlockId, Sha512Trunc256Sum)>; fn insert_metadata( &mut self, contract: &QualifiedContractIdentifier, key: &str, value: &str, - ) -> Result<()> { - let bhh = self.get_open_chain_tip(); - SqliteConnection::insert_metadata( - self.get_side_store(), - &bhh, - &contract.to_string(), - key, - value, - ) - } + ) -> Result<()>; fn get_metadata( &mut self, contract: &QualifiedContractIdentifier, key: &str, - ) -> Result> { - let (bhh, _) = self.get_contract_hash(contract)?; - SqliteConnection::get_metadata(self.get_side_store(), &bhh, &contract.to_string(), key) - } + ) -> Result>; fn get_metadata_manual( &mut self, at_height: u32, contract: &QualifiedContractIdentifier, key: &str, - ) -> Result> { - let bhh = self.get_block_at_height(at_height) - .ok_or_else(|| { - warn!("Unknown block height when manually querying metadata"; "block_height" => at_height); - RuntimeErrorType::BadBlockHeight(at_height.to_string()) - })?; - SqliteConnection::get_metadata(self.get_side_store(), &bhh, &contract.to_string(), key) - } + ) -> Result>; fn put_all_metadata( &mut self, @@ -240,6 +213,7 @@ impl ClarityBackingStore for NullBackingStore { panic!("NullBackingStore can't retrieve data") } + #[cfg(feature = "canonical")] fn get_side_store(&mut self) -> &Connection { panic!("NullBackingStore has no side store") } @@ -263,84 +237,37 @@ impl ClarityBackingStore for NullBackingStore { fn put_all_data(&mut self, mut _items: Vec<(String, String)>) -> Result<()> { panic!("NullBackingStore cannot put") } -} - -pub struct MemoryBackingStore { - side_store: Connection, -} -impl Default for MemoryBackingStore { - fn default() -> Self { - MemoryBackingStore::new() - } -} - -impl MemoryBackingStore { - #[allow(clippy::unwrap_used)] - pub fn new() -> MemoryBackingStore { - let side_store = SqliteConnection::memory().unwrap(); - - let mut memory_marf = MemoryBackingStore { side_store }; - - memory_marf.as_clarity_db().initialize(); - - memory_marf - } - - pub fn as_clarity_db(&mut self) -> ClarityDatabase { - ClarityDatabase::new(self, &NULL_HEADER_DB, &NULL_BURN_STATE_DB) - } - - pub fn as_analysis_db(&mut self) -> AnalysisDatabase { - AnalysisDatabase::new(self) - } -} - -impl ClarityBackingStore for MemoryBackingStore { - fn set_block_hash(&mut self, bhh: StacksBlockId) -> InterpreterResult { - Err(RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash(bhh.0)).into()) - } - - fn get_data(&mut self, key: &str) -> Result> { - SqliteConnection::get(self.get_side_store(), key) - } - - fn get_data_with_proof(&mut self, key: &str) -> Result)>> { - Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) - } - - fn get_side_store(&mut self) -> &Connection { - &self.side_store - } - - fn get_block_at_height(&mut self, height: u32) -> Option { - if height == 0 { - Some(StacksBlockId([255; 32])) - } else { - None - } - } - - fn get_open_chain_tip(&mut self) -> StacksBlockId { - StacksBlockId([255; 32]) - } - - fn get_open_chain_tip_height(&mut self) -> u32 { - 0 + fn get_contract_hash( + &mut self, + _contract: &QualifiedContractIdentifier, + ) -> Result<(StacksBlockId, Sha512Trunc256Sum)> { + panic!("NullBackingStore cannot get_contract_hash") } - fn get_current_block_height(&mut self) -> u32 { - 1 + fn insert_metadata( + &mut self, + _contract: &QualifiedContractIdentifier, + _key: &str, + _value: &str, + ) -> Result<()> { + panic!("NullBackingStore cannot insert_metadata") } - fn get_cc_special_cases_handler(&self) -> Option { - None + fn get_metadata( + &mut self, + _contract: &QualifiedContractIdentifier, + _key: &str, + ) -> Result> { + panic!("NullBackingStore cannot get_metadata") } - fn put_all_data(&mut self, items: Vec<(String, String)>) -> Result<()> { - for (key, value) in items.into_iter() { - SqliteConnection::put(self.get_side_store(), &key, &value)?; - } - Ok(()) + fn get_metadata_manual( + &mut self, + _at_height: u32, + _contract: &QualifiedContractIdentifier, + _key: &str, + ) -> Result> { + panic!("NullBackingStore cannot get_metadata_manual") } } diff --git a/clarity/src/vm/database/mod.rs b/clarity/src/vm/database/mod.rs index 1092992982..d16d944d55 100644 --- a/clarity/src/vm/database/mod.rs +++ b/clarity/src/vm/database/mod.rs @@ -15,13 +15,16 @@ // along with this program. If not, see . use hashbrown::HashMap; +#[cfg(feature = "canonical")] +pub use sqlite::MemoryBackingStore; pub use self::clarity_db::{ BurnStateDB, ClarityDatabase, HeadersDB, StoreType, NULL_BURN_STATE_DB, NULL_HEADER_DB, STORE_CONTRACT_SRC_INTERFACE, }; -pub use self::clarity_store::{ClarityBackingStore, MemoryBackingStore, SpecialCaseHandler}; +pub use self::clarity_store::{ClarityBackingStore, SpecialCaseHandler}; pub use self::key_value_wrapper::{RollbackWrapper, RollbackWrapperPersistedLog}; +#[cfg(feature = "canonical")] pub use self::sqlite::SqliteConnection; pub use self::structures::{ ClarityDeserializable, ClaritySerializable, DataMapMetadata, DataVariableMetadata, @@ -31,5 +34,6 @@ pub use self::structures::{ pub mod clarity_db; pub mod clarity_store; mod key_value_wrapper; -mod sqlite; +#[cfg(feature = "canonical")] +pub mod sqlite; mod structures; diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index 6b2d64afa5..bc8fca3dc3 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -14,18 +14,27 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use rusqlite::types::{FromSql, ToSql}; +use rusqlite::types::{FromSql, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use rusqlite::{ Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OptionalExtension, Row, Savepoint, NO_PARAMS, }; -use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; use stacks_common::util::db_common::tx_busy_handler; +use stacks_common::util::hash::Sha512Trunc256Sum; +use super::clarity_store::{make_contract_hash_key, ContractCommitment}; +use super::{ + ClarityBackingStore, ClarityDatabase, ClarityDeserializable, SpecialCaseHandler, + NULL_BURN_STATE_DB, NULL_HEADER_DB, +}; +use crate::vm::analysis::{AnalysisDatabase, CheckErrors}; use crate::vm::contracts::Contract; +use crate::vm::costs::ExecutionCost; use crate::vm::errors::{ Error, IncomparableError, InterpreterError, InterpreterResult as Result, RuntimeErrorType, }; +use crate::vm::types::QualifiedContractIdentifier; const SQL_FAIL_MESSAGE: &str = "PANIC: SQL Failure in Smart Contract VM."; @@ -73,6 +82,62 @@ fn sqlite_has_entry(conn: &Connection, key: &str) -> Result { Ok(sqlite_get(conn, key)?.is_some()) } +pub fn sqlite_get_contract_hash( + store: &mut dyn ClarityBackingStore, + contract: &QualifiedContractIdentifier, +) -> Result<(StacksBlockId, Sha512Trunc256Sum)> { + let key = make_contract_hash_key(contract); + let contract_commitment = store + .get_data(&key)? + .map(|x| ContractCommitment::deserialize(&x)) + .ok_or_else(|| CheckErrors::NoSuchContract(contract.to_string()))?; + let ContractCommitment { + block_height, + hash: contract_hash, + } = contract_commitment?; + let bhh = store.get_block_at_height(block_height) + .ok_or_else(|| InterpreterError::Expect("Should always be able to map from height to block hash when looking up contract information.".into()))?; + Ok((bhh, contract_hash)) +} + +pub fn sqlite_insert_metadata( + store: &mut dyn ClarityBackingStore, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, +) -> Result<()> { + let bhh = store.get_open_chain_tip(); + SqliteConnection::insert_metadata( + store.get_side_store(), + &bhh, + &contract.to_string(), + key, + value, + ) +} + +pub fn sqlite_get_metadata( + store: &mut dyn ClarityBackingStore, + contract: &QualifiedContractIdentifier, + key: &str, +) -> Result> { + let (bhh, _) = store.get_contract_hash(contract)?; + SqliteConnection::get_metadata(store.get_side_store(), &bhh, &contract.to_string(), key) +} + +pub fn sqlite_get_metadata_manual( + store: &mut dyn ClarityBackingStore, + at_height: u32, + contract: &QualifiedContractIdentifier, + key: &str, +) -> Result> { + let bhh = store.get_block_at_height(at_height).ok_or_else(|| { + warn!("Unknown block height when manually querying metadata"; "block_height" => at_height); + RuntimeErrorType::BadBlockHeight(at_height.to_string()) + })?; + SqliteConnection::get_metadata(store.get_side_store(), &bhh, &contract.to_string(), key) +} + impl SqliteConnection { pub fn put(conn: &Connection, key: &str, value: &str) -> Result<()> { sqlite_put(conn, key, value) @@ -217,3 +282,133 @@ impl SqliteConnection { Ok(conn) } } + +pub struct MemoryBackingStore { + side_store: Connection, +} + +impl Default for MemoryBackingStore { + fn default() -> Self { + MemoryBackingStore::new() + } +} + +impl MemoryBackingStore { + #[allow(clippy::unwrap_used)] + pub fn new() -> MemoryBackingStore { + let side_store = SqliteConnection::memory().unwrap(); + + let mut memory_marf = MemoryBackingStore { side_store }; + + memory_marf.as_clarity_db().initialize(); + + memory_marf + } + + pub fn as_clarity_db(&mut self) -> ClarityDatabase { + ClarityDatabase::new(self, &NULL_HEADER_DB, &NULL_BURN_STATE_DB) + } + + pub fn as_analysis_db(&mut self) -> AnalysisDatabase { + AnalysisDatabase::new(self) + } +} + +impl ClarityBackingStore for MemoryBackingStore { + fn set_block_hash(&mut self, bhh: StacksBlockId) -> Result { + Err(RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash(bhh.0)).into()) + } + + fn get_data(&mut self, key: &str) -> Result> { + SqliteConnection::get(self.get_side_store(), key) + } + + fn get_data_with_proof(&mut self, key: &str) -> Result)>> { + Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) + } + + fn get_side_store(&mut self) -> &Connection { + &self.side_store + } + + fn get_block_at_height(&mut self, height: u32) -> Option { + if height == 0 { + Some(StacksBlockId([255; 32])) + } else { + None + } + } + + fn get_open_chain_tip(&mut self) -> StacksBlockId { + StacksBlockId([255; 32]) + } + + fn get_open_chain_tip_height(&mut self) -> u32 { + 0 + } + + fn get_current_block_height(&mut self) -> u32 { + 1 + } + + fn get_cc_special_cases_handler(&self) -> Option { + None + } + + fn put_all_data(&mut self, items: Vec<(String, String)>) -> Result<()> { + for (key, value) in items.into_iter() { + SqliteConnection::put(self.get_side_store(), &key, &value)?; + } + Ok(()) + } + + fn get_contract_hash( + &mut self, + contract: &QualifiedContractIdentifier, + ) -> Result<(StacksBlockId, Sha512Trunc256Sum)> { + sqlite_get_contract_hash(self, contract) + } + + fn insert_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, + ) -> Result<()> { + sqlite_insert_metadata(self, contract, key, value) + } + + fn get_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> Result> { + sqlite_get_metadata(self, contract, key) + } + + fn get_metadata_manual( + &mut self, + at_height: u32, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> Result> { + sqlite_get_metadata_manual(self, at_height, contract, key) + } +} + +impl ToSql for ExecutionCost { + fn to_sql(&self) -> rusqlite::Result { + let val = serde_json::to_string(self) + .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?; + Ok(ToSqlOutput::from(val)) + } +} + +impl FromSql for ExecutionCost { + fn column_result(value: ValueRef) -> FromSqlResult { + let str_val = String::column_result(value)?; + let parsed = serde_json::from_str(&str_val) + .map_err(|e| rusqlite::types::FromSqlError::Other(Box::new(e)))?; + Ok(parsed) + } +} diff --git a/clarity/src/vm/docs/contracts.rs b/clarity/src/vm/docs/contracts.rs index 7426be7966..138203db71 100644 --- a/clarity/src/vm/docs/contracts.rs +++ b/clarity/src/vm/docs/contracts.rs @@ -4,10 +4,13 @@ use hashbrown::{HashMap, HashSet}; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::StacksEpochId; -use crate::vm::analysis::{mem_type_check, ContractAnalysis}; +#[cfg(feature = "canonical")] +use crate::vm::analysis::mem_type_check; +use crate::vm::analysis::ContractAnalysis; use crate::vm::ast::{build_ast_with_rules, ASTRules}; use crate::vm::contexts::GlobalContext; use crate::vm::costs::LimitedCostTracker; +#[cfg(feature = "canonical")] use crate::vm::database::MemoryBackingStore; use crate::vm::docs::{get_input_type_string, get_output_type_string, get_signature}; use crate::vm::types::{FunctionType, QualifiedContractIdentifier, Value}; @@ -60,6 +63,7 @@ fn make_func_ref(func_name: &str, func_type: &FunctionType, description: &str) - } } +#[cfg(feature = "canonical")] #[allow(clippy::expect_used)] fn get_constant_value(var_name: &str, contract_content: &str) -> Value { let to_eval = format!("{}\n{}", contract_content, var_name); @@ -68,6 +72,7 @@ fn get_constant_value(var_name: &str, contract_content: &str) -> Value { .expect("BUG: failed to return constant value") } +#[cfg(feature = "canonical")] fn doc_execute(program: &str) -> Result, vm::Error> { let contract_id = QualifiedContractIdentifier::transient(); let mut contract_context = ContractContext::new(contract_id.clone(), ClarityVersion::Clarity2); @@ -94,11 +99,15 @@ fn doc_execute(program: &str) -> Result, vm::Error> { }) } +#[cfg(feature = "canonical")] #[allow(clippy::expect_used)] -pub fn make_docs(content: &str, support_docs: &ContractSupportDocs) -> ContractRef { - let (_, contract_analysis) = - mem_type_check(content, ClarityVersion::latest(), StacksEpochId::latest()) - .expect("BUG: failed to type check boot contract"); +pub fn make_docs( + content: &str, + support_docs: &ContractSupportDocs, + version: ClarityVersion, +) -> ContractRef { + let (_, contract_analysis) = mem_type_check(content, version, StacksEpochId::latest()) + .expect("BUG: failed to type check boot contract"); let ContractAnalysis { public_function_types, @@ -176,15 +185,17 @@ pub fn make_docs(content: &str, support_docs: &ContractSupportDocs) -> ContractR /// Produce a set of documents for multiple contracts, supplied as a list of `(contract_name, contract_content)` pairs, /// and a map from `contract_name` to corresponding `ContractSupportDocs` +#[cfg(feature = "canonical")] pub fn produce_docs_refs, B: AsRef>( contracts: &[(A, B)], support_docs: &HashMap<&str, ContractSupportDocs>, + version: ClarityVersion, ) -> BTreeMap { let mut docs = BTreeMap::new(); for (contract_name, content) in contracts.iter() { if let Some(contract_support) = support_docs.get(contract_name.as_ref()) { - let contract_ref = make_docs(content.as_ref(), contract_support); + let contract_ref = make_docs(content.as_ref(), contract_support, version); docs.insert(contract_name.as_ref().to_string(), contract_ref); } diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index e0b78403b9..940b2f2f6a 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -41,7 +41,9 @@ pub struct KeywordAPI { pub description: &'static str, pub example: &'static str, /// The version where this keyword was first introduced. - pub version: ClarityVersion, + pub min_version: ClarityVersion, + /// The version where this keyword was disabled. + pub max_version: Option, } #[derive(Serialize, Clone)] @@ -63,7 +65,9 @@ pub struct FunctionAPI { pub description: String, pub example: String, /// The version where this keyword was first introduced. - pub version: ClarityVersion, + pub min_version: ClarityVersion, + /// The version where this keyword was disabled. + pub max_version: Option, } pub struct SimpleFunctionAPI { @@ -96,17 +100,19 @@ const BLOCK_HEIGHT: SimpleKeywordAPI = SimpleKeywordAPI { name: "block-height", snippet: "block-height", output_type: "uint", - description: "Returns the current block height of the Stacks blockchain as an uint", + description: "Returns the current block height of the Stacks blockchain in Clarity 1 and 2. +Upon activation of epoch 3.0, `block-height` will return the same value as `tenure-height`. +In Clarity 3, `block-height` is removed and has been replaced with `stacks-block-height`.", example: - "(> block-height 1000) ;; returns true if the current block-height has passed 1000 blocks.", + "(> block-height u1000) ;; returns true if the current block-height has passed 1000 blocks.", }; const BURN_BLOCK_HEIGHT: SimpleKeywordAPI = SimpleKeywordAPI { name: "burn-block-height", snippet: "burn-block-height", output_type: "uint", - description: "Returns the current block height of the underlying burn blockchain as a uint", - example: "(> burn-block-height 1000) ;; returns true if the current height of the underlying burn blockchain has passed 1000 blocks.", + description: "Returns the current block height of the underlying burn blockchain.", + example: "(> burn-block-height u832000) ;; returns true if the current height of the underlying burn blockchain has passed 832,000 blocks.", }; const CONTRACT_CALLER_KEYWORD: SimpleKeywordAPI = SimpleKeywordAPI { @@ -120,6 +126,25 @@ to the same contract principal.", example: "(print contract-caller) ;; Will print out a Stacks address of the transaction sender", }; +const STACKS_BLOCK_HEIGHT_KEYWORD: SimpleKeywordAPI = SimpleKeywordAPI { + name: "stacks-block-height", + snippet: "stacks-block-height", + output_type: "uint", + description: "Returns the current block height of the Stacks blockchain.", + example: + "(<= stacks-block-height u500000) ;; returns true if the current block-height has not passed 500,000 blocks.", +}; + +const TENURE_HEIGHT_KEYWORD: SimpleKeywordAPI = SimpleKeywordAPI { + name: "tenure-height", + snippet: "tenure-height", + output_type: "uint", + description: "Returns the number of tenures that have passed. +At the start of epoch 3.0, `tenure-height` will return the same value as `block-height`, then it will continue to increase as each tenures passes.", + example: + "(< tenure-height u140000) ;; returns true if the current tenure-height has passed 140,000 blocks.", +}; + const TX_SENDER_KEYWORD: SimpleKeywordAPI = SimpleKeywordAPI { name: "tx-sender", snippet: "tx-sender", @@ -245,7 +270,7 @@ const BUFF_TO_UINT_LE_API: SimpleFunctionAPI = SimpleFunctionAPI { name: None, snippet: "buff-to-uint-le ${1:buff}", signature: "(buff-to-uint-le (buff 16))", - description: "Converts a byte buffer to an unsigned integer use a little-endian encoding.. + description: "Converts a byte buffer to an unsigned integer use a little-endian encoding. The byte buffer can be up to 16 bytes in length. If there are fewer than 16 bytes, as this function uses a little-endian encoding, the input behaves as if it is zero-padded on the _right_. @@ -857,7 +882,8 @@ fn make_for_simple_native( signature: api.signature.to_string(), description: api.description.to_string(), example: api.example.to_string(), - version: function.get_version(), + min_version: function.get_min_version(), + max_version: function.get_max_version(), } } @@ -2538,13 +2564,15 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { } fn make_keyword_reference(variable: &NativeVariables) -> Option { - let simple_api = match variable { + let keyword = match variable { NativeVariables::TxSender => TX_SENDER_KEYWORD.clone(), NativeVariables::ContractCaller => CONTRACT_CALLER_KEYWORD.clone(), NativeVariables::NativeNone => NONE_KEYWORD.clone(), NativeVariables::NativeTrue => TRUE_KEYWORD.clone(), NativeVariables::NativeFalse => FALSE_KEYWORD.clone(), NativeVariables::BlockHeight => BLOCK_HEIGHT.clone(), + NativeVariables::StacksBlockHeight => STACKS_BLOCK_HEIGHT_KEYWORD.clone(), + NativeVariables::TenureHeight => TENURE_HEIGHT_KEYWORD.clone(), NativeVariables::BurnBlockHeight => BURN_BLOCK_HEIGHT.clone(), NativeVariables::TotalLiquidMicroSTX => TOTAL_LIQUID_USTX_KEYWORD.clone(), NativeVariables::Regtest => REGTEST_KEYWORD.clone(), @@ -2553,12 +2581,13 @@ fn make_keyword_reference(variable: &NativeVariables) -> Option { NativeVariables::TxSponsor => TX_SPONSOR_KEYWORD.clone(), }; Some(KeywordAPI { - name: simple_api.name, - snippet: simple_api.snippet, - output_type: simple_api.output_type, - description: simple_api.description, - example: simple_api.example, - version: variable.get_version(), + name: keyword.name, + snippet: keyword.snippet, + output_type: keyword.output_type, + description: keyword.description, + example: keyword.example, + min_version: variable.get_min_version(), + max_version: variable.get_max_version(), }) } @@ -2571,7 +2600,8 @@ fn make_for_special(api: &SpecialAPI, function: &NativeFunctions) -> FunctionAPI signature: api.signature.to_string(), description: api.description.to_string(), example: api.example.to_string(), - version: function.get_version(), + min_version: function.get_min_version(), + max_version: function.get_max_version(), } } @@ -2584,7 +2614,8 @@ fn make_for_define(api: &DefineAPI, name: String) -> FunctionAPI { signature: api.signature.to_string(), description: api.description.to_string(), example: api.example.to_string(), - version: ClarityVersion::Clarity1, + min_version: ClarityVersion::Clarity1, + max_version: None, } } diff --git a/clarity/src/vm/errors.rs b/clarity/src/vm/errors.rs index 55977ec6aa..b3b0ca5fea 100644 --- a/clarity/src/vm/errors.rs +++ b/clarity/src/vm/errors.rs @@ -17,6 +17,7 @@ use std::error::Error as ErrorTrait; use std::{error, fmt}; +#[cfg(feature = "canonical")] use rusqlite::Error as SqliteError; use serde_json::Error as SerdeJSONErr; use stacks_common::types::chainstate::BlockHeaderHash; @@ -56,6 +57,7 @@ pub enum InterpreterError { UninitializedPersistedVariable, FailedToConstructAssetTable, FailedToConstructEventBatch, + #[cfg(feature = "canonical")] SqliteError(IncomparableError), BadFileName, FailedToCreateDataDirectory, diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index 7c3647c2f6..1a40c1fc51 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -195,21 +195,6 @@ define_versioned_named_enum!(NativeFunctions(ClarityVersion) { ReplaceAt("replace-at?", ClarityVersion::Clarity2), }); -impl NativeFunctions { - pub fn lookup_by_name_at_version( - name: &str, - version: &ClarityVersion, - ) -> Option { - NativeFunctions::lookup_by_name(name).and_then(|native_function| { - if &native_function.get_version() <= version { - Some(native_function) - } else { - None - } - }) - } -} - /// /// Returns a callable for the given native function if it exists in the provided /// ClarityVersion diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index bca5223828..d64b207522 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -41,6 +41,9 @@ pub mod coverage; pub mod events; +#[cfg(feature = "canonical")] +pub mod tooling; + #[cfg(any(test, feature = "testing"))] pub mod tests; diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index 817a74917b..3c4dc14b2e 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -129,8 +129,9 @@ fn test_get_block_info_eval( let contract_identifier = QualifiedContractIdentifier::local(&format!("test-contract-{}", i)).unwrap(); owned_env - .initialize_contract( + .initialize_versioned_contract( contract_identifier.clone(), + ClarityVersion::Clarity2, contracts[i], None, ASTRules::PrecheckSize, @@ -1147,3 +1148,38 @@ fn test_cc_trait_stack_depth( RuntimeErrorType::MaxStackDepthReached.into() ); } + +#[apply(test_epochs)] +fn test_eval_with_non_existing_contract( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); + + let mut placeholder_context = ContractContext::new( + QualifiedContractIdentifier::transient(), + ClarityVersion::Clarity2, + ); + + let mut env = owned_env.get_exec_environment( + Some(get_principal().expect_principal().unwrap()), + None, + &mut placeholder_context, + ); + + let result = env.eval_read_only( + &QualifiedContractIdentifier::local("absent").unwrap(), + "(ok 0)", + ); + assert_eq!( + result.as_ref().unwrap_err(), + &Error::Unchecked(CheckErrors::NoSuchContract( + QualifiedContractIdentifier::local("absent") + .unwrap() + .to_string() + )) + ); + drop(env); + owned_env.commit().unwrap(); + assert!(owned_env.destruct().is_some()); +} diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 9f98e7e930..c60377ba3d 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -33,6 +33,19 @@ mod sequences; #[cfg(test)] mod simple_apply_eval; mod traits; +mod variables; + +#[cfg(any(test, feature = "testing"))] +impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { + pub fn set_tenure_height(&mut self, tenure_height: u32) { + self.context.database.begin(); + self.context + .database + .set_tenure_height(tenure_height) + .unwrap(); + self.context.database.commit().unwrap(); + } +} macro_rules! epochs_template { ($($epoch:ident,)*) => { @@ -50,7 +63,6 @@ macro_rules! epochs_template { match epoch { // don't test Epoch-1.0 StacksEpochId::Epoch10 => (), - StacksEpochId::Epoch30 => (), // this will lead to a compile time failure if an epoch is left out // of the epochs_template! macro list $(StacksEpochId::$epoch)|* => (), @@ -76,10 +88,16 @@ macro_rules! clarity_template { match (epoch, clarity) { // don't test Epoch-1.0 (StacksEpochId::Epoch10, _) => (), - (StacksEpochId::Epoch30, _) => (), // don't test these pairs, because they aren't supported: (StacksEpochId::Epoch20, ClarityVersion::Clarity2) => (), (StacksEpochId::Epoch2_05, ClarityVersion::Clarity2) => (), + (StacksEpochId::Epoch20, ClarityVersion::Clarity3) => (), + (StacksEpochId::Epoch2_05, ClarityVersion::Clarity3) => (), + (StacksEpochId::Epoch21, ClarityVersion::Clarity3) => (), + (StacksEpochId::Epoch22, ClarityVersion::Clarity3) => (), + (StacksEpochId::Epoch23, ClarityVersion::Clarity3) => (), + (StacksEpochId::Epoch24, ClarityVersion::Clarity3) => (), + (StacksEpochId::Epoch25, ClarityVersion::Clarity3) => (), // this will lead to a compile time failure if a pair is left out // of the clarity_template! macro list $((StacksEpochId::$epoch, ClarityVersion::$clarity))|* => (), @@ -103,6 +121,7 @@ epochs_template! { Epoch23, Epoch24, Epoch25, + Epoch30, } clarity_template! { @@ -118,6 +137,9 @@ clarity_template! { (Epoch24, Clarity2), (Epoch25, Clarity1), (Epoch25, Clarity2), + (Epoch30, Clarity1), + (Epoch30, Clarity2), + (Epoch30, Clarity3), } #[cfg(test)] @@ -150,7 +172,14 @@ impl MemoryEnvironmentGenerator { pub struct TopLevelMemoryEnvironmentGenerator(MemoryBackingStore); impl TopLevelMemoryEnvironmentGenerator { pub fn get_env(&mut self, epoch: StacksEpochId) -> OwnedEnvironment { - let owned_env = OwnedEnvironment::new(self.0.as_clarity_db(), epoch); + let mut db = self.0.as_clarity_db(); + db.begin(); + db.set_clarity_epoch_version(epoch).unwrap(); + db.commit().unwrap(); + let mut owned_env = OwnedEnvironment::new(db, epoch); + if epoch >= StacksEpochId::Epoch30 { + owned_env.set_tenure_height(1); + } owned_env } } diff --git a/clarity/src/vm/tests/variables.rs b/clarity/src/vm/tests/variables.rs new file mode 100644 index 0000000000..41b880afe9 --- /dev/null +++ b/clarity/src/vm/tests/variables.rs @@ -0,0 +1,1097 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[cfg(any(test, feature = "testing"))] +use rstest::rstest; +use stacks_common::types::StacksEpochId; + +#[cfg(test)] +use crate::vm::analysis::type_checker::v2_1::tests::contracts::type_check_version; +use crate::vm::analysis::{run_analysis, CheckError}; +use crate::vm::ast::{parse, ASTRules}; +use crate::vm::database::MemoryBackingStore; +use crate::vm::errors::{CheckErrors, Error}; +use crate::vm::tests::{test_clarity_versions, tl_env_factory, TopLevelMemoryEnvironmentGenerator}; +use crate::vm::types::{QualifiedContractIdentifier, Value}; +use crate::vm::{ClarityVersion, ContractContext}; + +#[apply(test_clarity_versions)] +fn test_block_height( + version: ClarityVersion, + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { + let contract = "(define-read-only (test-func) block-height)"; + + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::transient(), version); + + let mut owned_env = tl_env_factory.get_env(epoch); + let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); + + let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut marf = MemoryBackingStore::new(); + let mut db = marf.as_analysis_db(); + let analysis = db.execute(|db| { + type_check_version(&contract_identifier, &mut exprs, db, true, epoch, version) + }); + if version >= ClarityVersion::Clarity3 { + let err = analysis.unwrap_err(); + assert_eq!( + CheckErrors::UndefinedVariable("block-height".to_string()), + err.err + ); + } else { + assert!(analysis.is_ok()); + } + + // If we're testing epoch 3, we need to simulate the tenure height being + // set at the transition. + if epoch >= StacksEpochId::Epoch30 { + owned_env.set_tenure_height(1); + } + + // Initialize the contract + // Note that we're ignoring the analysis failure here so that we can test + // the runtime behavior. In Clarity 3, if this case somehow gets past the + // analysis, it should fail at runtime. + let result = owned_env.initialize_versioned_contract( + contract_identifier.clone(), + version, + contract, + None, + ASTRules::PrecheckSize, + ); + + let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + + // Call the function + let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); + // In Clarity 3, this should trigger a runtime error + if version >= ClarityVersion::Clarity3 { + let err = eval_result.unwrap_err(); + assert_eq!( + Error::Unchecked(CheckErrors::UndefinedVariable("block-height".to_string(),)), + err + ); + } else { + assert_eq!(Ok(Value::UInt(1)), eval_result); + } +} + +#[apply(test_clarity_versions)] +fn test_stacks_block_height( + version: ClarityVersion, + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { + let contract = "(define-read-only (test-func) stacks-block-height)"; + + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::transient(), version); + + let mut owned_env = tl_env_factory.get_env(epoch); + let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); + + let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut marf = MemoryBackingStore::new(); + let mut db = marf.as_analysis_db(); + let analysis = db.execute(|db| { + type_check_version(&contract_identifier, &mut exprs, db, true, epoch, version) + }); + if version < ClarityVersion::Clarity3 { + let err = analysis.unwrap_err(); + assert_eq!( + CheckErrors::UndefinedVariable("stacks-block-height".to_string()), + err.err + ); + } else { + assert!(analysis.is_ok()); + } + + // If we're testing epoch 3, we need to simulate the tenure height being + // set at the transition. + if epoch >= StacksEpochId::Epoch30 { + owned_env.set_tenure_height(1); + } + + // Initialize the contract + // Note that we're ignoring the analysis failure here so that we can test + // the runtime behavior. In Clarity 3, if this case somehow gets past the + // analysis, it should fail at runtime. + let result = owned_env.initialize_versioned_contract( + contract_identifier.clone(), + version, + contract, + None, + ASTRules::PrecheckSize, + ); + + let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + + // Call the function + let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); + // In Clarity 3, this should trigger a runtime error + if version < ClarityVersion::Clarity3 { + let err = eval_result.unwrap_err(); + assert_eq!( + Error::Unchecked(CheckErrors::UndefinedVariable( + "stacks-block-height".to_string(), + )), + err + ); + } else { + assert_eq!(Ok(Value::UInt(1)), eval_result); + } +} + +#[apply(test_clarity_versions)] +fn test_tenure_height( + version: ClarityVersion, + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { + let contract = "(define-read-only (test-func) tenure-height)"; + + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::transient(), version); + + let mut owned_env = tl_env_factory.get_env(epoch); + let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); + + let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut marf = MemoryBackingStore::new(); + let mut db = marf.as_analysis_db(); + let analysis = db.execute(|db| { + type_check_version(&contract_identifier, &mut exprs, db, true, epoch, version) + }); + if version < ClarityVersion::Clarity3 { + let err = analysis.unwrap_err(); + assert_eq!( + CheckErrors::UndefinedVariable("tenure-height".to_string()), + err.err + ); + } else { + assert!(analysis.is_ok()); + } + + // If we're testing epoch 3, we need to simulate the tenure height being + // set at the transition. + if epoch >= StacksEpochId::Epoch30 { + owned_env.set_tenure_height(1); + } + + // Initialize the contract + // Note that we're ignoring the analysis failure here so that we can test + // the runtime behavior. In Clarity 3, if this case somehow gets past the + // analysis, it should fail at runtime. + let result = owned_env.initialize_versioned_contract( + contract_identifier.clone(), + version, + contract, + None, + ASTRules::PrecheckSize, + ); + + let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + + // Call the function + let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); + // In Clarity 3, this should trigger a runtime error + if version < ClarityVersion::Clarity3 { + let err = eval_result.unwrap_err(); + assert_eq!( + Error::Unchecked(CheckErrors::UndefinedVariable("tenure-height".to_string(),)), + err + ); + } else { + assert_eq!(Ok(Value::UInt(1)), eval_result); + } +} + +#[derive(Debug, PartialEq)] +enum WhenError { + Analysis, + Initialization, + Runtime, + Never, +} + +#[cfg(test)] +fn expect_contract_error( + version: ClarityVersion, + epoch: StacksEpochId, + tl_env_factory: &mut TopLevelMemoryEnvironmentGenerator, + name: &str, + contract: &str, + expected_errors: &[( + WhenError, + fn(ClarityVersion, StacksEpochId) -> bool, + CheckErrors, + )], + expected_success: Value, +) { + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::local(name).unwrap(), version); + + let mut owned_env = tl_env_factory.get_env(epoch); + let contract_identifier = QualifiedContractIdentifier::local(name).unwrap(); + + let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut marf = MemoryBackingStore::new(); + let mut db = marf.as_analysis_db(); + let analysis = db.execute(|db| { + type_check_version(&contract_identifier, &mut exprs, db, true, epoch, version) + }); + + for (when, err_condition, expected_error) in expected_errors { + if *when == WhenError::Analysis && err_condition(version, epoch) { + let err = analysis.unwrap_err(); + assert_eq!(*expected_error, err.err); + + // Do not continue with the test if the analysis failed. + return; + } + } + + // The type-checker does not report an error for the reuse of the built-in + // name `stacks-block-height`. It is instead caught at initialization. This + // matches the behavior of Clarity 1 and 2. + assert!(analysis.is_ok()); + + // Initialize the contract + // Note that we're ignoring the analysis failure here so that we can test + // the runtime behavior. In Clarity 3, if this case somehow gets past the + // analysis, it should fail at runtime. + let init_result = owned_env.initialize_versioned_contract( + contract_identifier.clone(), + version, + contract, + None, + ASTRules::PrecheckSize, + ); + + for (when, err_condition, expected_error) in expected_errors { + if *when == WhenError::Initialization && err_condition(version, epoch) { + let err = init_result.unwrap_err(); + if let Error::Unchecked(inner_err) = &err { + assert_eq!(expected_error, inner_err); + } else { + panic!("Expected an Unchecked error, but got a different error"); + } + + // Do not continue with the test if the initialization failed. + return; + } + } + + let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + + // Call the function + let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); + + for (when, err_condition, expected_error) in expected_errors { + if *when == WhenError::Runtime && err_condition(version, epoch) { + let err = eval_result.unwrap_err(); + if let Error::Unchecked(inner_err) = &err { + assert_eq!(expected_error, inner_err); + } else { + panic!("Expected an Unchecked error, but got a different error"); + } + + // Do not continue with the test if the evaluation failed. + return; + } + } + + assert_eq!(Ok(expected_success), eval_result); +} + +#[apply(test_clarity_versions)] +fn reuse_block_height( + version: ClarityVersion, + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { + // data var + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "data-var", + r#" + (define-data-var block-height uint u1234) + (define-read-only (test-func) + (var-get block-height) + ) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::UInt(1234), + ); + + // map + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "map", + r#" + (define-map block-height uint uint) + (define-private (test-func) + (map-insert block-height u1 u2) + ) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(true), + ); + + // let + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "let", + r#" + (define-private (test-func) + (let ((block-height 32)) + block-height + ) + ) + "#, + &[ + ( + WhenError::Runtime, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Int(32), + ); + + // match binding + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "match-binding", + r#" + (define-read-only (test-func) + (let ((x (if true (ok u5) (err u7)))) + (match x + block-height 3 + e 4 + ) + ) + ) + "#, + &[ + ( + WhenError::Runtime, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Int(3), + ); + + // private function + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-private (block-height) true) + (define-private (test-func) (block-height)) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(true), + ); + + // constant + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "constant", + r#" + (define-constant block-height u1234) + (define-read-only (test-func) block-height) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::UInt(1234), + ); + + // define-trait + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-trait block-height ()) + (define-read-only (test-func) false) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(false), + ); + + // tuple + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "tuple", + r#" + (define-read-only (test-func) + (get block-height { block-height: 1234 }) + ) + "#, + &[], + Value::Int(1234), + ); + + // define-fungible-token + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-fungible-token block-height) + (define-read-only (test-func) false) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(false), + ); + + // define-non-fungible-token + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-non-fungible-token block-height uint) + (define-read-only (test-func) false) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(false), + ); + + // define-public + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-public (block-height) (ok true)) + (define-private (test-func) (unwrap-panic (block-height))) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(true), + ); + + // define-read-only + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-read-only (block-height) true) + (define-private (test-func) (block-height)) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(true), + ); +} + +#[apply(test_clarity_versions)] +fn reuse_stacks_block_height( + version: ClarityVersion, + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { + // data var + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "data-var", + r#" + (define-data-var stacks-block-height uint u1234) + (define-read-only (test-func) + (var-get stacks-block-height) + ) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::UInt(1234), + ); + + // map + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "map", + r#" + (define-map stacks-block-height uint uint) + (define-private (test-func) + (map-insert stacks-block-height u1 u2) + ) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(true), + ); + + // let + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "let", + r#" + (define-private (test-func) + (let ((stacks-block-height 32)) + stacks-block-height + ) + ) + "#, + &[( + WhenError::Runtime, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Int(32), + ); + + // match binding + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "match-binding", + r#" + (define-read-only (test-func) + (let ((x (if true (ok u5) (err u7)))) + (match x + stacks-block-height 3 + e 4 + ) + ) + ) + "#, + &[( + WhenError::Runtime, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Int(3), + ); + + // function + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-private (stacks-block-height) true) + (define-private (test-func) (stacks-block-height)) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(true), + ); + + // constant + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "constant", + r#" + (define-constant stacks-block-height u1234) + (define-read-only (test-func) stacks-block-height) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::UInt(1234), + ); + + // define-trait + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-trait stacks-block-height ()) + (define-read-only (test-func) false) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(false), + ); + + // tuple + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "tuple", + r#" + (define-read-only (test-func) + (get stacks-block-height { stacks-block-height: 1234 }) + ) + "#, + &[], + Value::Int(1234), + ); + + // define-fungible-token + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-fungible-token stacks-block-height) + (define-read-only (test-func) false) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(false), + ); + + // define-non-fungible-token + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-non-fungible-token stacks-block-height uint) + (define-read-only (test-func) false) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(false), + ); + + // define-public + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-public (stacks-block-height) (ok true)) + (define-private (test-func) (unwrap-panic (stacks-block-height))) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(true), + ); + + // define-read-only + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-read-only (stacks-block-height) true) + (define-private (test-func) (stacks-block-height)) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(true), + ); +} + +#[apply(test_clarity_versions)] +fn reuse_tenure_height( + version: ClarityVersion, + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { + // data var + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "data-var", + r#" + (define-data-var tenure-height uint u1234) + (define-read-only (test-func) + (var-get tenure-height) + ) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::UInt(1234), + ); + + // map + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "map", + r#" + (define-map tenure-height uint uint) + (define-private (test-func) + (map-insert tenure-height u1 u2) + ) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(true), + ); + + // let + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "let", + r#" + (define-private (test-func) + (let ((tenure-height 32)) + tenure-height + ) + ) + "#, + &[( + WhenError::Runtime, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Int(32), + ); + + // match binding + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "match-binding", + r#" + (define-read-only (test-func) + (let ((x (if true (ok u5) (err u7)))) + (match x + tenure-height 3 + e 4 + ) + ) + ) + "#, + &[( + WhenError::Runtime, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Int(3), + ); + + // function + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-private (tenure-height) true) + (define-private (test-func) (tenure-height)) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(true), + ); + + // constant + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "constant", + r#" + (define-constant tenure-height u1234) + (define-read-only (test-func) tenure-height) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::UInt(1234), + ); + + // define-trait + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-trait tenure-height ()) + (define-read-only (test-func) false) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(false), + ); + + // tuple + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "tuple", + r#" + (define-read-only (test-func) + (get tenure-height { tenure-height: 1234 }) + ) + "#, + &[], + Value::Int(1234), + ); + + // define-fungible-token + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-fungible-token tenure-height) + (define-read-only (test-func) false) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(false), + ); + + // define-non-fungible-token + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-non-fungible-token tenure-height uint) + (define-read-only (test-func) false) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(false), + ); + + // define-public + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-public (tenure-height) (ok true)) + (define-private (test-func) (unwrap-panic (tenure-height))) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(true), + ); + + // define-read-only + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-read-only (tenure-height) true) + (define-private (test-func) (tenure-height)) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(true), + ); +} diff --git a/clarity/src/vm/tooling/mod.rs b/clarity/src/vm/tooling/mod.rs new file mode 100644 index 0000000000..f218b2ccab --- /dev/null +++ b/clarity/src/vm/tooling/mod.rs @@ -0,0 +1,60 @@ +use std::collections::{BTreeMap, HashMap, HashSet}; + +use stacks_common::consts::CHAIN_ID_TESTNET; +use stacks_common::types::StacksEpochId; + +use super::analysis::ContractAnalysis; +use super::contexts::GlobalContext; +use super::docs::contracts::ContractRef; +use super::types::TypeSignature; +use super::{eval_all, ClarityVersion, ContractContext, Error as VmError, Value}; +use crate::vm::analysis::{run_analysis, CheckResult}; +use crate::vm::ast::{build_ast_with_rules, ASTRules}; +use crate::vm::costs::LimitedCostTracker; +use crate::vm::database::MemoryBackingStore; +use crate::vm::types::QualifiedContractIdentifier; + +/// Used by CLI tools like the docs generator. Not used in production +pub fn mem_type_check( + snippet: &str, + version: ClarityVersion, + epoch: StacksEpochId, +) -> CheckResult<(Option, ContractAnalysis)> { + let contract_identifier = QualifiedContractIdentifier::transient(); + let mut contract = build_ast_with_rules( + &contract_identifier, + snippet, + &mut (), + version, + epoch, + ASTRules::PrecheckSize, + ) + .unwrap() + .expressions; + + let mut marf = MemoryBackingStore::new(); + let mut analysis_db = marf.as_analysis_db(); + let cost_tracker = LimitedCostTracker::new_free(); + match run_analysis( + &QualifiedContractIdentifier::transient(), + &mut contract, + &mut analysis_db, + false, + cost_tracker, + epoch, + version, + true, + ) { + Ok(x) => { + // return the first type result of the type checker + let first_type = x + .type_map + .as_ref() + .unwrap() + .get_type_expected(&x.expressions.last().unwrap()) + .cloned(); + Ok((first_type, x)) + } + Err((e, _)) => Err(e), + } +} diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 46734dcc51..5662f2bb8a 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -771,19 +771,6 @@ impl BlockInfoProperty { MinerAddress => TypeSignature::PrincipalType, } } - - pub fn lookup_by_name_at_version( - name: &str, - version: &ClarityVersion, - ) -> Option { - BlockInfoProperty::lookup_by_name(name).and_then(|native_function| { - if &native_function.get_version() <= version { - Some(native_function) - } else { - None - } - }) - } } impl BurnBlockInfoProperty { diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index c9971f97ae..293c36fd5a 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -15,8 +15,10 @@ // along with this program. If not, see . use std::collections::btree_map::Entry; -use std::collections::{hash_map, BTreeMap, HashMap}; +use std::collections::{hash_map, BTreeMap}; use std::hash::{Hash, Hasher}; +use std::ops::Deref; +use std::sync::Arc; use std::{cmp, fmt}; // TypeSignatures @@ -76,7 +78,36 @@ impl AssetIdentifier { #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct TupleTypeSignature { - type_map: HashMap, + #[serde(with = "tuple_type_map_serde")] + type_map: Arc>, +} + +mod tuple_type_map_serde { + use std::collections::BTreeMap; + use std::ops::Deref; + use std::sync::Arc; + + use serde::{Deserializer, Serializer}; + + use super::TypeSignature; + use crate::vm::ClarityName; + + pub fn serialize( + map: &Arc>, + ser: S, + ) -> Result { + serde::Serialize::serialize(map.deref(), ser) + } + + pub fn deserialize<'de, D>( + deser: D, + ) -> Result>, D::Error> + where + D: Deserializer<'de>, + { + let map = serde::Deserialize::deserialize(deser)?; + Ok(Arc::new(map)) + } } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] @@ -787,12 +818,12 @@ impl TypeSignature { inner_type.1.canonicalize_v2_1(), ))), TupleType(ref tuple_sig) => { - let mut canonicalized_fields = HashMap::new(); + let mut canonicalized_fields = BTreeMap::new(); for (field_name, field_type) in tuple_sig.get_type_map() { canonicalized_fields.insert(field_name.clone(), field_type.canonicalize_v2_1()); } TypeSignature::from(TupleTypeSignature { - type_map: canonicalized_fields, + type_map: Arc::new(canonicalized_fields), }) } TraitReferenceType(trait_id) => CallableType(CallableSubtype::Trait(trait_id.clone())), @@ -851,9 +882,9 @@ impl TryFrom> for TupleTypeSignature { return Err(CheckErrors::EmptyTuplesNotAllowed); } - let mut type_map = HashMap::new(); + let mut type_map = BTreeMap::new(); for (name, type_info) in type_data.into_iter() { - if let hash_map::Entry::Vacant(e) = type_map.entry(name.clone()) { + if let Entry::Vacant(e) = type_map.entry(name.clone()) { e.insert(type_info); } else { return Err(CheckErrors::NameAlreadyUsed(name.into())); @@ -874,30 +905,7 @@ impl TryFrom> for TupleTypeSignature { return Err(CheckErrors::TypeSignatureTooDeep); } } - let type_map = type_map.into_iter().collect(); - let result = TupleTypeSignature { type_map }; - let would_be_size = result - .inner_size()? - .ok_or_else(|| CheckErrors::ValueTooLarge)?; - if would_be_size > MAX_VALUE_SIZE { - Err(CheckErrors::ValueTooLarge) - } else { - Ok(result) - } - } -} - -impl TryFrom> for TupleTypeSignature { - type Error = CheckErrors; - fn try_from(type_map: HashMap) -> Result { - if type_map.is_empty() { - return Err(CheckErrors::EmptyTuplesNotAllowed); - } - for child_sig in type_map.values() { - if (1 + child_sig.depth()) > MAX_TYPE_DEPTH { - return Err(CheckErrors::TypeSignatureTooDeep); - } - } + let type_map = Arc::new(type_map.into_iter().collect()); let result = TupleTypeSignature { type_map }; let would_be_size = result .inner_size()? @@ -925,7 +933,7 @@ impl TupleTypeSignature { self.type_map.get(field) } - pub fn get_type_map(&self) -> &HashMap { + pub fn get_type_map(&self) -> &BTreeMap { &self.type_map } @@ -961,7 +969,7 @@ impl TupleTypeSignature { } pub fn shallow_merge(&mut self, update: &mut TupleTypeSignature) { - self.type_map.extend(update.type_map.drain()); + Arc::make_mut(&mut self.type_map).append(Arc::make_mut(&mut update.type_map)); } } diff --git a/clarity/src/vm/variables.rs b/clarity/src/vm/variables.rs index 539e14c39e..a5947d00cd 100644 --- a/clarity/src/vm/variables.rs +++ b/clarity/src/vm/variables.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use stacks_common::types::StacksEpochId; + use super::errors::InterpreterError; use crate::vm::contexts::{Environment, LocalContext}; use crate::vm::costs::cost_functions::ClarityCostFunction; @@ -22,36 +24,23 @@ use crate::vm::errors::{InterpreterResult as Result, RuntimeErrorType}; use crate::vm::types::{BuffData, Value}; use crate::vm::ClarityVersion; -define_versioned_named_enum!(NativeVariables(ClarityVersion) { - ContractCaller("contract-caller", ClarityVersion::Clarity1), - TxSender("tx-sender", ClarityVersion::Clarity1), - BlockHeight("block-height", ClarityVersion::Clarity1), - BurnBlockHeight("burn-block-height", ClarityVersion::Clarity1), - NativeNone("none", ClarityVersion::Clarity1), - NativeTrue("true", ClarityVersion::Clarity1), - NativeFalse("false", ClarityVersion::Clarity1), - TotalLiquidMicroSTX("stx-liquid-supply", ClarityVersion::Clarity1), - Regtest("is-in-regtest", ClarityVersion::Clarity1), - TxSponsor("tx-sponsor?", ClarityVersion::Clarity2), - Mainnet("is-in-mainnet", ClarityVersion::Clarity2), - ChainId("chain-id", ClarityVersion::Clarity2), +define_versioned_named_enum_with_max!(NativeVariables(ClarityVersion) { + ContractCaller("contract-caller", ClarityVersion::Clarity1, None), + TxSender("tx-sender", ClarityVersion::Clarity1, None), + BlockHeight("block-height", ClarityVersion::Clarity1, Some(ClarityVersion::Clarity2)), + BurnBlockHeight("burn-block-height", ClarityVersion::Clarity1, None), + NativeNone("none", ClarityVersion::Clarity1, None), + NativeTrue("true", ClarityVersion::Clarity1, None), + NativeFalse("false", ClarityVersion::Clarity1, None), + TotalLiquidMicroSTX("stx-liquid-supply", ClarityVersion::Clarity1, None), + Regtest("is-in-regtest", ClarityVersion::Clarity1, None), + TxSponsor("tx-sponsor?", ClarityVersion::Clarity2, None), + Mainnet("is-in-mainnet", ClarityVersion::Clarity2, None), + ChainId("chain-id", ClarityVersion::Clarity2, None), + StacksBlockHeight("stacks-block-height", ClarityVersion::Clarity3, None), + TenureHeight("tenure-height", ClarityVersion::Clarity3, None), }); -impl NativeVariables { - pub fn lookup_by_name_at_version( - name: &str, - version: &ClarityVersion, - ) -> Option { - NativeVariables::lookup_by_name(name).and_then(|native_function| { - if &native_function.get_version() <= version { - Some(native_function) - } else { - None - } - }) - } -} - pub fn is_reserved_name(name: &str, version: &ClarityVersion) -> bool { NativeVariables::lookup_by_name_at_version(name, version).is_some() } @@ -92,8 +81,19 @@ pub fn lookup_reserved_variable( } NativeVariables::BlockHeight => { runtime_cost(ClarityCostFunction::FetchVar, env, 1)?; - let block_height = env.global_context.database.get_current_block_height(); - Ok(Some(Value::UInt(block_height as u128))) + // In epoch 2.x, the `block-height` keyword returns the Stacks block height. + // For Clarity 1 and Clarity 2 contracts executing in epoch 3, `block-height` + // is equal to the tenure height instead of the Stacks block height. This change + // is made to maintain a similar pace at which this value increments (e.g. for use + // as an expiration). In Clarity 3, `block-height` is removed to avoid confusion. + // It is replaced with two new keywords: `stacks-block-height` and `tenure-height`. + if env.global_context.epoch_id < StacksEpochId::Epoch30 { + let block_height = env.global_context.database.get_current_block_height(); + Ok(Some(Value::UInt(block_height as u128))) + } else { + let tenure_height = env.global_context.database.get_tenure_height()?; + Ok(Some(Value::UInt(tenure_height as u128))) + } } NativeVariables::BurnBlockHeight => { runtime_cost(ClarityCostFunction::FetchVar, env, 1)?; @@ -123,6 +123,16 @@ pub fn lookup_reserved_variable( let chain_id = env.global_context.chain_id; Ok(Some(Value::UInt(chain_id.into()))) } + NativeVariables::StacksBlockHeight => { + runtime_cost(ClarityCostFunction::FetchVar, env, 1)?; + let block_height = env.global_context.database.get_current_block_height(); + Ok(Some(Value::UInt(block_height as u128))) + } + NativeVariables::TenureHeight => { + runtime_cost(ClarityCostFunction::FetchVar, env, 1)?; + let tenure_height = env.global_context.database.get_tenure_height()?; + Ok(Some(Value::UInt(tenure_height as u128))) + } } } else { Ok(None) diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index f64d4ee878..4c437d52cc 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -9,6 +9,7 @@ use crate::vm::errors::{Error, RuntimeErrorType}; pub enum ClarityVersion { Clarity1, Clarity2, + Clarity3, } impl fmt::Display for ClarityVersion { @@ -16,13 +17,14 @@ impl fmt::Display for ClarityVersion { match self { ClarityVersion::Clarity1 => write!(f, "Clarity 1"), ClarityVersion::Clarity2 => write!(f, "Clarity 2"), + ClarityVersion::Clarity3 => write!(f, "Clarity 3"), } } } impl ClarityVersion { pub fn latest() -> ClarityVersion { - ClarityVersion::Clarity2 + ClarityVersion::Clarity3 } pub fn default_for_epoch(epoch_id: StacksEpochId) -> ClarityVersion { match epoch_id { @@ -37,7 +39,7 @@ impl ClarityVersion { StacksEpochId::Epoch23 => ClarityVersion::Clarity2, StacksEpochId::Epoch24 => ClarityVersion::Clarity2, StacksEpochId::Epoch25 => ClarityVersion::Clarity2, - StacksEpochId::Epoch30 => ClarityVersion::Clarity2, + StacksEpochId::Epoch30 => ClarityVersion::Clarity3, } } } @@ -51,9 +53,12 @@ impl FromStr for ClarityVersion { Ok(ClarityVersion::Clarity1) } else if s == "clarity2" { Ok(ClarityVersion::Clarity2) + } else if s == "clarity3" { + Ok(ClarityVersion::Clarity3) } else { Err(RuntimeErrorType::ParseError( - "Invalid clarity version. Valid versions are: Clarity1, Clarity2.".to_string(), + "Invalid clarity version. Valid versions are: Clarity1, Clarity2, Clarity3." + .to_string(), ) .into()) } diff --git a/contrib/boot-contracts-stateful-prop-tests/.gitignore b/contrib/boot-contracts-stateful-prop-tests/.gitignore new file mode 100644 index 0000000000..393158bd1c --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/.gitignore @@ -0,0 +1,8 @@ +logs +*.log +npm-debug.log* +coverage +*.info +costs-reports.json +node_modules +history.txt diff --git a/contrib/boot-contracts-stateful-prop-tests/Clarinet.toml b/contrib/boot-contracts-stateful-prop-tests/Clarinet.toml new file mode 100644 index 0000000000..f0d404a755 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/Clarinet.toml @@ -0,0 +1,2 @@ +[project] +name = "boot-contracts-stateful-prop-tests" diff --git a/contrib/boot-contracts-stateful-prop-tests/deployments/default.simnet-plan.yaml b/contrib/boot-contracts-stateful-prop-tests/deployments/default.simnet-plan.yaml new file mode 100644 index 0000000000..1837aee68a --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/deployments/default.simnet-plan.yaml @@ -0,0 +1,52 @@ +--- +id: 0 +name: "Simulated deployment, used as a default for `clarinet console`, `clarinet test` and `clarinet check`" +network: simnet +genesis: + wallets: + - name: deployer + address: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM + balance: "100000000000000" + - name: wallet_1 + address: ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5 + balance: "100000000000000" + - name: wallet_2 + address: ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG + balance: "100000000000000" + - name: wallet_3 + address: ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC + balance: "100000000000000" + - name: wallet_4 + address: ST2NEB84ASENDXKYGJPQW86YXQCEFEX2ZQPG87ND + balance: "100000000000000" + - name: wallet_5 + address: ST2REHHS5J3CERCRBEPMGH7921Q6PYKAADT7JP2VB + balance: "100000000000000" + - name: wallet_6 + address: ST3AM1A56AK2C1XAFJ4115ZSV26EB49BVQ10MGCS0 + balance: "100000000000000" + - name: wallet_7 + address: ST3PF13W7Z0RRM42A8VZRVFQ75SV1K26RXEP8YGKJ + balance: "100000000000000" + - name: wallet_8 + address: ST3NBRSFKX28FQ2ZJ1MAKX58HKHSDGNV5N7R21XCP + balance: "100000000000000" + - name: wallet_9 + address: STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6 + balance: "100000000000000" + contracts: + - costs + - pox + - pox-2 + - pox-3 + - pox-4 + - lockup + - costs-2 + - costs-3 + - cost-voting + - bns +plan: + batches: + - id: 0 + transactions: [] + epoch: "2.4" diff --git a/contrib/boot-contracts-stateful-prop-tests/package-lock.json b/contrib/boot-contracts-stateful-prop-tests/package-lock.json new file mode 100644 index 0000000000..e3040db2e2 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/package-lock.json @@ -0,0 +1,2394 @@ +{ + "name": "boot-contracts-stateful-prop-tests", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "boot-contracts-stateful-prop-tests", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "@hirosystems/clarinet-sdk": "^2.6.0", + "@stacks/stacking": "^6.14.0", + "@stacks/transactions": "^6.13.1", + "chokidar-cli": "^3.0.0", + "fast-check": "^3.18.0", + "typescript": "^5.4.5", + "vite": "^5.2.10", + "vitest": "^1.5.2", + "vitest-environment-clarinet": "^2.1.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.20.2.tgz", + "integrity": "sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.20.2.tgz", + "integrity": "sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.20.2.tgz", + "integrity": "sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.20.2.tgz", + "integrity": "sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.20.2.tgz", + "integrity": "sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.20.2.tgz", + "integrity": "sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.20.2.tgz", + "integrity": "sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.20.2.tgz", + "integrity": "sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.20.2.tgz", + "integrity": "sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.20.2.tgz", + "integrity": "sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.20.2.tgz", + "integrity": "sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.20.2.tgz", + "integrity": "sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==", + "cpu": [ + "loong64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.20.2.tgz", + "integrity": "sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==", + "cpu": [ + "mips64el" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.20.2.tgz", + "integrity": "sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.20.2.tgz", + "integrity": "sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==", + "cpu": [ + "riscv64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.20.2.tgz", + "integrity": "sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.20.2.tgz", + "integrity": "sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.20.2.tgz", + "integrity": "sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.20.2.tgz", + "integrity": "sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.20.2.tgz", + "integrity": "sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.20.2.tgz", + "integrity": "sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.20.2.tgz", + "integrity": "sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.20.2.tgz", + "integrity": "sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@hirosystems/clarinet-sdk": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk/-/clarinet-sdk-2.6.0.tgz", + "integrity": "sha512-8qyvpaeTmhn/Lrsg7zjNpIr9Ova1zVfzMNeBC4+y42tqxHX0j6MM58nr5m56bz5/0u+KPOvQpAhuVxGR27/NiA==", + "dependencies": { + "@hirosystems/clarinet-sdk-wasm": "^2.6.0", + "@stacks/encryption": "^6.13.0", + "@stacks/network": "^6.13.0", + "@stacks/stacking": "^6.13.0", + "@stacks/transactions": "^6.13.0", + "kolorist": "^1.8.0", + "prompts": "^2.4.2", + "vitest": "^1.0.4", + "yargs": "^17.7.2" + }, + "bin": { + "clarinet-sdk": "dist/cjs/bin/index.js" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@hirosystems/clarinet-sdk-wasm": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk-wasm/-/clarinet-sdk-wasm-2.6.0.tgz", + "integrity": "sha512-cUpYrnLX4VnpnumlYTCUNf1gFfl2kL18q63C1qFzUzkjFszffR+x0U2lxOQrz3EY3/U6eWeZvZPdKbOFO3zgqQ==" + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@noble/hashes": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.1.5.tgz", + "integrity": "sha512-LTMZiiLc+V4v1Yi16TD6aX2gmtKszNye0pQgbaLqkvhIqP7nVsSaJsWloGQjJfJ8offaoP5GtX3yY5swbcJxxQ==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@noble/secp256k1": { + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@noble/secp256k1/-/secp256k1-1.7.1.tgz", + "integrity": "sha512-hOUk6AyBFmqVrv7k5WAw/LpszxVbj9gGN4JRkIX52fdFAj1UA61KXmZDvqVEm+pOyec3+fIeZB02LYa/pWOArw==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.17.1.tgz", + "integrity": "sha512-P6Wg856Ou/DLpR+O0ZLneNmrv7QpqBg+hK4wE05ijbC/t349BRfMfx+UFj5Ha3fCFopIa6iSZlpdaB4agkWp2Q==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.17.1.tgz", + "integrity": "sha512-piwZDjuW2WiHr05djVdUkrG5JbjnGbtx8BXQchYCMfib/nhjzWoiScelZ+s5IJI7lecrwSxHCzW026MWBL+oJQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.17.1.tgz", + "integrity": "sha512-LsZXXIsN5Q460cKDT4Y+bzoPDhBmO5DTr7wP80d+2EnYlxSgkwdPfE3hbE+Fk8dtya+8092N9srjBTJ0di8RIA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.17.1.tgz", + "integrity": "sha512-S7TYNQpWXB9APkxu/SLmYHezWwCoZRA9QLgrDeml+SR2A1LLPD2DBUdUlvmCF7FUpRMKvbeeWky+iizQj65Etw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.17.1.tgz", + "integrity": "sha512-Lq2JR5a5jsA5um2ZoLiXXEaOagnVyCpCW7xvlcqHC7y46tLwTEgUSTM3a2TfmmTMmdqv+jknUioWXlmxYxE9Yw==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.17.1.tgz", + "integrity": "sha512-9BfzwyPNV0IizQoR+5HTNBGkh1KXE8BqU0DBkqMngmyFW7BfuIZyMjQ0s6igJEiPSBvT3ZcnIFohZ19OqjhDPg==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.17.1.tgz", + "integrity": "sha512-e2uWaoxo/rtzA52OifrTSXTvJhAXb0XeRkz4CdHBK2KtxrFmuU/uNd544Ogkpu938BzEfvmWs8NZ8Axhw33FDw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.17.1.tgz", + "integrity": "sha512-ekggix/Bc/d/60H1Mi4YeYb/7dbal1kEDZ6sIFVAE8pUSx7PiWeEh+NWbL7bGu0X68BBIkgF3ibRJe1oFTksQQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.17.1.tgz", + "integrity": "sha512-UGV0dUo/xCv4pkr/C8KY7XLFwBNnvladt8q+VmdKrw/3RUd3rD0TptwjisvE2TTnnlENtuY4/PZuoOYRiGp8Gw==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.17.1.tgz", + "integrity": "sha512-gEYmYYHaehdvX46mwXrU49vD6Euf1Bxhq9pPb82cbUU9UT2NV+RSckQ5tKWOnNXZixKsy8/cPGtiUWqzPuAcXQ==", + "cpu": [ + "riscv64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.17.1.tgz", + "integrity": "sha512-xeae5pMAxHFp6yX5vajInG2toST5lsCTrckSRUFwNgzYqnUjNBcQyqk1bXUxX5yhjWFl2Mnz3F8vQjl+2FRIcw==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.17.1.tgz", + "integrity": "sha512-AsdnINQoDWfKpBzCPqQWxSPdAWzSgnYbrJYtn6W0H2E9It5bZss99PiLA8CgmDRfvKygt20UpZ3xkhFlIfX9zQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.17.1.tgz", + "integrity": "sha512-KoB4fyKXTR+wYENkIG3fFF+5G6N4GFvzYx8Jax8BR4vmddtuqSb5oQmYu2Uu067vT/Fod7gxeQYKupm8gAcMSQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.17.1.tgz", + "integrity": "sha512-J0d3NVNf7wBL9t4blCNat+d0PYqAx8wOoY+/9Q5cujnafbX7BmtYk3XvzkqLmFECaWvXGLuHmKj/wrILUinmQg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.17.1.tgz", + "integrity": "sha512-xjgkWUwlq7IbgJSIxvl516FJ2iuC/7ttjsAxSPpC9kkI5iQQFHKyEN5BjbhvJ/IXIZ3yIBcW5QDlWAyrA+TFag==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.17.1.tgz", + "integrity": "sha512-0QbCkfk6cnnVKWqqlC0cUrrUMDMfu5ffvYMTUHf+qMN2uAb3MKP31LPcwiMXBNsvoFGs/kYdFOsuLmvppCopXA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@scure/base": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.6.tgz", + "integrity": "sha512-ok9AWwhcgYuGG3Zfhyqg+zwl+Wn5uE+dwC0NV/2qQkx4dABbb/bx96vWu8NSj+BNjjSjno+JRYRjle1jV08k3g==", + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@scure/bip39": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@scure/bip39/-/bip39-1.1.0.tgz", + "integrity": "sha512-pwrPOS16VeTKg98dYXQyIjJEcWfz7/1YJIwxUEPFfQPtc86Ym/1sVgQ2RLoD43AazMk2l/unK4ITySSpW2+82w==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "dependencies": { + "@noble/hashes": "~1.1.1", + "@scure/base": "~1.1.0" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" + }, + "node_modules/@stacks/common": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@stacks/common/-/common-6.13.0.tgz", + "integrity": "sha512-wwzyihjaSdmL6NxKvDeayy3dqM0L0Q2sawmdNtzJDi0FnXuJGm5PeapJj7bEfcI9XwI7Bw5jZoC6mCn9nc5YIw==", + "dependencies": { + "@types/bn.js": "^5.1.0", + "@types/node": "^18.0.4" + } + }, + "node_modules/@stacks/encryption": { + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/@stacks/encryption/-/encryption-6.13.1.tgz", + "integrity": "sha512-y5IFX3/nGI3fCk70gE0JwH70GpshD8RhUfvhMLcL96oNaec1cCdj1ZUiQupeicfYTHuraaVBYU9xLls4TRmypg==", + "dependencies": { + "@noble/hashes": "1.1.5", + "@noble/secp256k1": "1.7.1", + "@scure/bip39": "1.1.0", + "@stacks/common": "^6.13.0", + "@types/node": "^18.0.4", + "base64-js": "^1.5.1", + "bs58": "^5.0.0", + "ripemd160-min": "^0.0.6", + "varuint-bitcoin": "^1.1.2" + } + }, + "node_modules/@stacks/network": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@stacks/network/-/network-6.13.0.tgz", + "integrity": "sha512-Ss/Da4BNyPBBj1OieM981fJ7SkevKqLPkzoI1+Yo7cYR2df+0FipIN++Z4RfpJpc8ne60vgcx7nJZXQsiGhKBQ==", + "dependencies": { + "@stacks/common": "^6.13.0", + "cross-fetch": "^3.1.5" + } + }, + "node_modules/@stacks/stacking": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/@stacks/stacking/-/stacking-6.14.0.tgz", + "integrity": "sha512-P6ITXYpb5q4hgWMPimJW84mih3hQuQ0ko7AcnJ4SPy17nt1rxEz7/zgyRnqg1Lc18zt4HqfF9SKM7+Sqt/EMZA==", + "dependencies": { + "@noble/hashes": "1.1.5", + "@scure/base": "1.1.1", + "@stacks/common": "^6.13.0", + "@stacks/encryption": "^6.13.1", + "@stacks/network": "^6.13.0", + "@stacks/stacks-blockchain-api-types": "^0.61.0", + "@stacks/transactions": "^6.13.1", + "bs58": "^5.0.0" + } + }, + "node_modules/@stacks/stacking/node_modules/@scure/base": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.1.tgz", + "integrity": "sha512-ZxOhsSyxYwLJj3pLZCefNitxsj093tb2vq90mp2txoYeBqbcjDjqFhyM8eUjq/uFm6zJ+mUuqxlS2FkuSY1MTA==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@stacks/stacks-blockchain-api-types": { + "version": "0.61.0", + "resolved": "https://registry.npmjs.org/@stacks/stacks-blockchain-api-types/-/stacks-blockchain-api-types-0.61.0.tgz", + "integrity": "sha512-yPOfTUboo5eA9BZL/hqMcM71GstrFs9YWzOrJFPeP4cOO1wgYvAcckgBRbgiE3NqeX0A7SLZLDAXLZbATuRq9w==" + }, + "node_modules/@stacks/transactions": { + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/@stacks/transactions/-/transactions-6.13.1.tgz", + "integrity": "sha512-PWw2I+2Fj3CaFYQIoVcqQN6E2qGHNhFv03nuR0CxMq0sx8stPgYZbdzUlnlBcJQdsFiHrw3sPeqnXDZt+Hg5YQ==", + "dependencies": { + "@noble/hashes": "1.1.5", + "@noble/secp256k1": "1.7.1", + "@stacks/common": "^6.13.0", + "@stacks/network": "^6.13.0", + "c32check": "^2.0.0", + "lodash.clonedeep": "^4.5.0" + } + }, + "node_modules/@types/bn.js": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-5.1.5.tgz", + "integrity": "sha512-V46N0zwKRF5Q00AZ6hWtN0T8gGmDUaUzLWQvHFo5yThtVwK/VCenFY3wXVbOvNfajEpsTfQM4IN9k/d6gUVX3A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + }, + "node_modules/@types/node": { + "version": "18.19.31", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.31.tgz", + "integrity": "sha512-ArgCD39YpyyrtFKIqMDvjz79jto5fcI/SVUs2HwB+f0dAzq68yqOdyaSivLiLugSziTpNXLQrVb7RZFmdZzbhA==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@vitest/expect": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-1.5.2.tgz", + "integrity": "sha512-rf7MTD1WCoDlN3FfYJ9Llfp0PbdtOMZ3FIF0AVkDnKbp3oiMW1c8AmvRZBcqbAhDUAvF52e9zx4WQM1r3oraVA==", + "dependencies": { + "@vitest/spy": "1.5.2", + "@vitest/utils": "1.5.2", + "chai": "^4.3.10" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-1.5.2.tgz", + "integrity": "sha512-7IJ7sJhMZrqx7HIEpv3WrMYcq8ZNz9L6alo81Y6f8hV5mIE6yVZsFoivLZmr0D777klm1ReqonE9LyChdcmw6g==", + "dependencies": { + "@vitest/utils": "1.5.2", + "p-limit": "^5.0.0", + "pathe": "^1.1.1" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-1.5.2.tgz", + "integrity": "sha512-CTEp/lTYos8fuCc9+Z55Ga5NVPKUgExritjF5VY7heRFUfheoAqBneUlvXSUJHUZPjnPmyZA96yLRJDP1QATFQ==", + "dependencies": { + "magic-string": "^0.30.5", + "pathe": "^1.1.1", + "pretty-format": "^29.7.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-1.5.2.tgz", + "integrity": "sha512-xCcPvI8JpCtgikT9nLpHPL1/81AYqZy1GCy4+MCHBE7xi8jgsYkULpW5hrx5PGLgOQjUpb6fd15lqcriJ40tfQ==", + "dependencies": { + "tinyspy": "^2.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-1.5.2.tgz", + "integrity": "sha512-sWOmyofuXLJ85VvXNsroZur7mOJGiQeM0JN3/0D1uU8U9bGFM69X1iqHaRXl6R8BwaLY6yPCogP257zxTzkUdA==", + "dependencies": { + "diff-sequences": "^29.6.3", + "estree-walker": "^3.0.3", + "loupe": "^2.3.7", + "pretty-format": "^29.7.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/acorn": { + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.2.tgz", + "integrity": "sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/assertion-error": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", + "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", + "engines": { + "node": "*" + } + }, + "node_modules/base-x": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/base-x/-/base-x-4.0.0.tgz", + "integrity": "sha512-FuwxlW4H5kh37X/oW59pwTzzTKRzfrrQwhmyspRM7swOEZcHtDZSCt45U6oKgtuFE+WYPblePMVIPR4RZrh/hw==" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/bs58": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/bs58/-/bs58-5.0.0.tgz", + "integrity": "sha512-r+ihvQJvahgYT50JD05dyJNKlmmSlMoOGwn1lCcEzanPglg7TxYjioQUYehQ9mAR/+hOSd2jRc/Z2y5UxBymvQ==", + "dependencies": { + "base-x": "^4.0.0" + } + }, + "node_modules/c32check": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/c32check/-/c32check-2.0.0.tgz", + "integrity": "sha512-rpwfAcS/CMqo0oCqDf3r9eeLgScRE3l/xHDCXhM3UyrfvIn7PrLq63uHh7yYbv8NzaZn5MVsVhIRpQ+5GZ5HyA==", + "dependencies": { + "@noble/hashes": "^1.1.2", + "base-x": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/chai": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.4.1.tgz", + "integrity": "sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==", + "dependencies": { + "assertion-error": "^1.1.0", + "check-error": "^1.0.3", + "deep-eql": "^4.1.3", + "get-func-name": "^2.0.2", + "loupe": "^2.3.6", + "pathval": "^1.1.1", + "type-detect": "^4.0.8" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/check-error": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.3.tgz", + "integrity": "sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==", + "dependencies": { + "get-func-name": "^2.0.2" + }, + "engines": { + "node": "*" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar-cli": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chokidar-cli/-/chokidar-cli-3.0.0.tgz", + "integrity": "sha512-xVW+Qeh7z15uZRxHOkP93Ux8A0xbPzwK4GaqD8dQOYc34TlkqUhVSS59fK36DOp5WdJlrRzlYSy02Ht99FjZqQ==", + "dependencies": { + "chokidar": "^3.5.2", + "lodash.debounce": "^4.0.8", + "lodash.throttle": "^4.1.1", + "yargs": "^13.3.0" + }, + "bin": { + "chokidar": "index.js" + }, + "engines": { + "node": ">= 8.10.0" + } + }, + "node_modules/chokidar-cli/node_modules/ansi-regex": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz", + "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==", + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/chokidar-cli/node_modules/cliui": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", + "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", + "dependencies": { + "string-width": "^3.1.0", + "strip-ansi": "^5.2.0", + "wrap-ansi": "^5.1.0" + } + }, + "node_modules/chokidar-cli/node_modules/emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" + }, + "node_modules/chokidar-cli/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha512-VHskAKYM8RfSFXwee5t5cbN5PZeq1Wrh6qd5bkyiXIf6UQcN6w/A0eXM9r6t8d+GYOh+o6ZhiEnb88LN/Y8m2w==", + "engines": { + "node": ">=4" + } + }, + "node_modules/chokidar-cli/node_modules/string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dependencies": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dependencies": { + "ansi-regex": "^4.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/wrap-ansi": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", + "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", + "dependencies": { + "ansi-styles": "^3.2.0", + "string-width": "^3.0.0", + "strip-ansi": "^5.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/y18n": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", + "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" + }, + "node_modules/chokidar-cli/node_modules/yargs": { + "version": "13.3.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", + "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", + "dependencies": { + "cliui": "^5.0.0", + "find-up": "^3.0.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^3.0.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^13.1.2" + } + }, + "node_modules/chokidar-cli/node_modules/yargs-parser": { + "version": "13.1.2", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", + "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", + "dependencies": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/confbox": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.7.tgz", + "integrity": "sha512-uJcB/FKZtBMCJpK8MQji6bJHgu1tixKPxRLeGkNzBoOZzpnZUJm0jm2/sBDWcuBx1dYgxV4JU+g5hmNxCyAmdA==" + }, + "node_modules/cross-fetch": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz", + "integrity": "sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==", + "dependencies": { + "node-fetch": "^2.6.12" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/deep-eql": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.3.tgz", + "integrity": "sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==", + "dependencies": { + "type-detect": "^4.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/esbuild": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.20.2.tgz", + "integrity": "sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==", + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.20.2", + "@esbuild/android-arm": "0.20.2", + "@esbuild/android-arm64": "0.20.2", + "@esbuild/android-x64": "0.20.2", + "@esbuild/darwin-arm64": "0.20.2", + "@esbuild/darwin-x64": "0.20.2", + "@esbuild/freebsd-arm64": "0.20.2", + "@esbuild/freebsd-x64": "0.20.2", + "@esbuild/linux-arm": "0.20.2", + "@esbuild/linux-arm64": "0.20.2", + "@esbuild/linux-ia32": "0.20.2", + "@esbuild/linux-loong64": "0.20.2", + "@esbuild/linux-mips64el": "0.20.2", + "@esbuild/linux-ppc64": "0.20.2", + "@esbuild/linux-riscv64": "0.20.2", + "@esbuild/linux-s390x": "0.20.2", + "@esbuild/linux-x64": "0.20.2", + "@esbuild/netbsd-x64": "0.20.2", + "@esbuild/openbsd-x64": "0.20.2", + "@esbuild/sunos-x64": "0.20.2", + "@esbuild/win32-arm64": "0.20.2", + "@esbuild/win32-ia32": "0.20.2", + "@esbuild/win32-x64": "0.20.2" + } + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/execa": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz", + "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^8.0.1", + "human-signals": "^5.0.0", + "is-stream": "^3.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^5.1.0", + "onetime": "^6.0.0", + "signal-exit": "^4.1.0", + "strip-final-newline": "^3.0.0" + }, + "engines": { + "node": ">=16.17" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/fast-check": { + "version": "3.18.0", + "resolved": "https://registry.npmjs.org/fast-check/-/fast-check-3.18.0.tgz", + "integrity": "sha512-/951xaT0kA40w0GXRsZXEwSTE7LugjZtSA/8vPgFkiPQ8wNp8tRvqWuNDHBgLxJYXtsK11e/7Q4ObkKW5BdTFQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ], + "dependencies": { + "pure-rand": "^6.1.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-func-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.2.tgz", + "integrity": "sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==", + "engines": { + "node": "*" + } + }, + "node_modules/get-stream": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz", + "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/human-signals": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz", + "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==", + "engines": { + "node": ">=16.17.0" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-stream": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", + "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/js-tokens": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.0.tgz", + "integrity": "sha512-WriZw1luRMlmV3LGJaR6QOJjWwgLUTf89OwT2lUOyjX2dJGBwgmIkbcz+7WFZjrZM635JOIR517++e/67CP9dQ==" + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/kolorist": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/kolorist/-/kolorist-1.8.0.tgz", + "integrity": "sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==" + }, + "node_modules/local-pkg": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.5.0.tgz", + "integrity": "sha512-ok6z3qlYyCDS4ZEU27HaU6x/xZa9Whf8jD4ptH5UZTQYZVYeb9bnZ3ojVhiJNLiXK1Hfc0GNbLXcmZ5plLDDBg==", + "dependencies": { + "mlly": "^1.4.2", + "pkg-types": "^1.0.3" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lodash.clonedeep": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", + "integrity": "sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "node_modules/lodash.throttle": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz", + "integrity": "sha512-wIkUCfVKpVsWo3JSZlc+8MB5it+2AN5W8J7YVMST30UrvcQNZ1Okbj+rbVniijTWE6FGYy4XJq/rHkas8qJMLQ==" + }, + "node_modules/loupe": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.7.tgz", + "integrity": "sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==", + "dependencies": { + "get-func-name": "^2.0.1" + } + }, + "node_modules/magic-string": { + "version": "0.30.10", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.10.tgz", + "integrity": "sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/mimic-fn": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", + "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mlly": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.6.1.tgz", + "integrity": "sha512-vLgaHvaeunuOXHSmEbZ9izxPx3USsk8KCQ8iC+aTlp5sKRSoZvwhHh5L9VbKSaVC6sJDqbyohIS76E2VmHIPAA==", + "dependencies": { + "acorn": "^8.11.3", + "pathe": "^1.1.2", + "pkg-types": "^1.0.3", + "ufo": "^1.3.2" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz", + "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==", + "dependencies": { + "path-key": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/onetime": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", + "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", + "dependencies": { + "mimic-fn": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-limit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-5.0.0.tgz", + "integrity": "sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==", + "dependencies": { + "yocto-queue": "^1.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/p-locate/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/pathe": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", + "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==" + }, + "node_modules/pathval": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", + "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==", + "engines": { + "node": "*" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.1.0.tgz", + "integrity": "sha512-/RpmvKdxKf8uILTtoOhAgf30wYbP2Qw+L9p3Rvshx1JZVX+XQNZQFjlbmGHEGIm4CkVPlSn+NXmIM8+9oWQaSA==", + "dependencies": { + "confbox": "^0.1.7", + "mlly": "^1.6.1", + "pathe": "^1.1.2" + } + }, + "node_modules/postcss": { + "version": "8.4.38", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz", + "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/pure-rand": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ] + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==" + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" + }, + "node_modules/ripemd160-min": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/ripemd160-min/-/ripemd160-min-0.0.6.tgz", + "integrity": "sha512-+GcJgQivhs6S9qvLogusiTcS9kQUfgR75whKuy5jIhuiOfQuJ8fjqxV6EGD5duH1Y/FawFUMtMhyeq3Fbnib8A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/rollup": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.17.1.tgz", + "integrity": "sha512-0gG94inrUtg25sB2V/pApwiv1lUb0bQ25FPNuzO89Baa+B+c0ccaaBKM5zkZV/12pUUdH+lWCSm9wmHqyocuVQ==", + "dependencies": { + "@types/estree": "1.0.5" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.17.1", + "@rollup/rollup-android-arm64": "4.17.1", + "@rollup/rollup-darwin-arm64": "4.17.1", + "@rollup/rollup-darwin-x64": "4.17.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.17.1", + "@rollup/rollup-linux-arm-musleabihf": "4.17.1", + "@rollup/rollup-linux-arm64-gnu": "4.17.1", + "@rollup/rollup-linux-arm64-musl": "4.17.1", + "@rollup/rollup-linux-powerpc64le-gnu": "4.17.1", + "@rollup/rollup-linux-riscv64-gnu": "4.17.1", + "@rollup/rollup-linux-s390x-gnu": "4.17.1", + "@rollup/rollup-linux-x64-gnu": "4.17.1", + "@rollup/rollup-linux-x64-musl": "4.17.1", + "@rollup/rollup-win32-arm64-msvc": "4.17.1", + "@rollup/rollup-win32-ia32-msvc": "4.17.1", + "@rollup/rollup-win32-x64-msvc": "4.17.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==" + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "node_modules/source-map-js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==" + }, + "node_modules/std-env": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz", + "integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==" + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", + "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strip-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-2.1.0.tgz", + "integrity": "sha512-Op+UycaUt/8FbN/Z2TWPBLge3jWrP3xj10f3fnYxf052bKuS3EKs1ZQcVGjnEMdsNVAM+plXRdmjrZ/KgG3Skw==", + "dependencies": { + "js-tokens": "^9.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/tinybench": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.8.0.tgz", + "integrity": "sha512-1/eK7zUnIklz4JUUlL+658n58XO2hHLQfSk1Zf2LKieUjxidN16eKFEoDEfjHc3ohofSSqK3X5yO6VGb6iW8Lw==" + }, + "node_modules/tinypool": { + "version": "0.8.4", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.8.4.tgz", + "integrity": "sha512-i11VH5gS6IFeLY3gMBQ00/MmLncVP7JLXOw1vlgkytLmJK7QnEr7NXf0LBdxfmNPAeyetukOk0bOYrJrFGjYJQ==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-2.2.1.tgz", + "integrity": "sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/typescript": { + "version": "5.4.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz", + "integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/ufo": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.5.3.tgz", + "integrity": "sha512-Y7HYmWaFwPUmkoQCUIAYpKqkOf+SbVj/2fJJZ4RJMCfZp0rTGwRbzQD+HghfnhKOjL9E01okqz+ncJskGYfBNw==" + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/varuint-bitcoin": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/varuint-bitcoin/-/varuint-bitcoin-1.1.2.tgz", + "integrity": "sha512-4EVb+w4rx+YfVM32HQX42AbbT7/1f5zwAYhIujKXKk8NQK+JfRVl3pqT3hjNn/L+RstigmGGKVwHA/P0wgITZw==", + "dependencies": { + "safe-buffer": "^5.1.1" + } + }, + "node_modules/vite": { + "version": "5.2.10", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.10.tgz", + "integrity": "sha512-PAzgUZbP7msvQvqdSD+ErD5qGnSFiGOoWmV5yAKUEI0kdhjbH6nMWVyZQC/hSc4aXwc0oJ9aEdIiF9Oje0JFCw==", + "dependencies": { + "esbuild": "^0.20.1", + "postcss": "^8.4.38", + "rollup": "^4.13.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite-node": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-1.5.2.tgz", + "integrity": "sha512-Y8p91kz9zU+bWtF7HGt6DVw2JbhyuB2RlZix3FPYAYmUyZ3n7iTp8eSyLyY6sxtPegvxQtmlTMhfPhUfCUF93A==", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.3.4", + "pathe": "^1.1.1", + "picocolors": "^1.0.0", + "vite": "^5.0.0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vitest": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-1.5.2.tgz", + "integrity": "sha512-l9gwIkq16ug3xY7BxHwcBQovLZG75zZL0PlsiYQbf76Rz6QGs54416UWMtC0jXeihvHvcHrf2ROEjkQRVpoZYw==", + "dependencies": { + "@vitest/expect": "1.5.2", + "@vitest/runner": "1.5.2", + "@vitest/snapshot": "1.5.2", + "@vitest/spy": "1.5.2", + "@vitest/utils": "1.5.2", + "acorn-walk": "^8.3.2", + "chai": "^4.3.10", + "debug": "^4.3.4", + "execa": "^8.0.1", + "local-pkg": "^0.5.0", + "magic-string": "^0.30.5", + "pathe": "^1.1.1", + "picocolors": "^1.0.0", + "std-env": "^3.5.0", + "strip-literal": "^2.0.0", + "tinybench": "^2.5.1", + "tinypool": "^0.8.3", + "vite": "^5.0.0", + "vite-node": "1.5.2", + "why-is-node-running": "^2.2.2" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/node": "^18.0.0 || >=20.0.0", + "@vitest/browser": "1.5.2", + "@vitest/ui": "1.5.2", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/vitest-environment-clarinet": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/vitest-environment-clarinet/-/vitest-environment-clarinet-2.1.0.tgz", + "integrity": "sha512-1SA9XZh47qmbV724sGo2FyjVU+Ar3m5TOU4bLGSlWDb/x388IKUPrHbHWqIQNwY+gwEm9VBfXEAd1LOSUdemBw==", + "peerDependencies": { + "@hirosystems/clarinet-sdk": ">=2.6.0", + "vitest": "^1.5.2" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-module": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz", + "integrity": "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==" + }, + "node_modules/why-is-node-running": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.2.2.tgz", + "integrity": "sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/wrap-ansi/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/package.json b/contrib/boot-contracts-stateful-prop-tests/package.json new file mode 100644 index 0000000000..89e3da95b0 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/package.json @@ -0,0 +1,23 @@ +{ + "name": "boot-contracts-stateful-prop-tests", + "version": "1.0.0", + "description": "Run stateful property-based tests on this project.", + "private": true, + "type": "module", + "scripts": { + "test": "vitest run" + }, + "author": "", + "license": "ISC", + "dependencies": { + "@hirosystems/clarinet-sdk": "^2.6.0", + "@stacks/stacking": "^6.14.0", + "@stacks/transactions": "^6.13.1", + "chokidar-cli": "^3.0.0", + "fast-check": "^3.18.0", + "typescript": "^5.4.5", + "vite": "^5.2.10", + "vitest": "^1.5.2", + "vitest-environment-clarinet": "^2.1.0" + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/settings/Devnet.toml b/contrib/boot-contracts-stateful-prop-tests/settings/Devnet.toml new file mode 100644 index 0000000000..bb941fddc9 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/settings/Devnet.toml @@ -0,0 +1,73 @@ +[network] +name = "devnet" + +[accounts.deployer] +mnemonic = "twice kind fence tip hidden tilt action fragile skin nothing glory cousin green tomorrow spring wrist shed math olympic multiply hip blue scout claw" +balance = 100_000_000_000_000 +# secret_key: 753b7cc01a1a2e86221266a154af739463fce51219d97e4f856cd7200c3bd2a601 +# stx_address: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM +# btc_address: mqVnk6NPRdhntvfm4hh9vvjiRkFDUuSYsH + +[accounts.wallet_1] +mnemonic = "sell invite acquire kitten bamboo drastic jelly vivid peace spawn twice guilt pave pen trash pretty park cube fragile unaware remain midnight betray rebuild" +balance = 100_000_000_000_000 +# secret_key: 7287ba251d44a4d3fd9276c88ce34c5c52a038955511cccaf77e61068649c17801 +# stx_address: ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5 +# btc_address: mr1iPkD9N3RJZZxXRk7xF9d36gffa6exNC + +[accounts.wallet_2] +mnemonic = "hold excess usual excess ring elephant install account glad dry fragile donkey gaze humble truck breeze nation gasp vacuum limb head keep delay hospital" +balance = 100_000_000_000_000 +# secret_key: 530d9f61984c888536871c6573073bdfc0058896dc1adfe9a6a10dfacadc209101 +# stx_address: ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG +# btc_address: muYdXKmX9bByAueDe6KFfHd5Ff1gdN9ErG + +[accounts.wallet_3] +mnemonic = "cycle puppy glare enroll cost improve round trend wrist mushroom scorpion tower claim oppose clever elephant dinosaur eight problem before frozen dune wagon high" +balance = 100_000_000_000_000 +# secret_key: d655b2523bcd65e34889725c73064feb17ceb796831c0e111ba1a552b0f31b3901 +# stx_address: ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC +# btc_address: mvZtbibDAAA3WLpY7zXXFqRa3T4XSknBX7 + +[accounts.wallet_4] +mnemonic = "board list obtain sugar hour worth raven scout denial thunder horse logic fury scorpion fold genuine phrase wealth news aim below celery when cabin" +balance = 100_000_000_000_000 +# secret_key: f9d7206a47f14d2870c163ebab4bf3e70d18f5d14ce1031f3902fbbc894fe4c701 +# stx_address: ST2NEB84ASENDXKYGJPQW86YXQCEFEX2ZQPG87ND +# btc_address: mg1C76bNTutiCDV3t9nWhZs3Dc8LzUufj8 + +[accounts.wallet_5] +mnemonic = "hurry aunt blame peanut heavy update captain human rice crime juice adult scale device promote vast project quiz unit note reform update climb purchase" +balance = 100_000_000_000_000 +# secret_key: 3eccc5dac8056590432db6a35d52b9896876a3d5cbdea53b72400bc9c2099fe801 +# stx_address: ST2REHHS5J3CERCRBEPMGH7921Q6PYKAADT7JP2VB +# btc_address: mweN5WVqadScHdA81aATSdcVr4B6dNokqx + +[accounts.wallet_6] +mnemonic = "area desk dutch sign gold cricket dawn toward giggle vibrant indoor bench warfare wagon number tiny universe sand talk dilemma pottery bone trap buddy" +balance = 100_000_000_000_000 +# secret_key: 7036b29cb5e235e5fd9b09ae3e8eec4404e44906814d5d01cbca968a60ed4bfb01 +# stx_address: ST3AM1A56AK2C1XAFJ4115ZSV26EB49BVQ10MGCS0 +# btc_address: mzxXgV6e4BZSsz8zVHm3TmqbECt7mbuErt + +[accounts.wallet_7] +mnemonic = "prevent gallery kind limb income control noise together echo rival record wedding sense uncover school version force bleak nuclear include danger skirt enact arrow" +balance = 100_000_000_000_000 +# secret_key: b463f0df6c05d2f156393eee73f8016c5372caa0e9e29a901bb7171d90dc4f1401 +# stx_address: ST3PF13W7Z0RRM42A8VZRVFQ75SV1K26RXEP8YGKJ +# btc_address: n37mwmru2oaVosgfuvzBwgV2ysCQRrLko7 + +[accounts.wallet_8] +mnemonic = "female adjust gallery certain visit token during great side clown fitness like hurt clip knife warm bench start reunion globe detail dream depend fortune" +balance = 100_000_000_000_000 +# secret_key: 6a1a754ba863d7bab14adbbc3f8ebb090af9e871ace621d3e5ab634e1422885e01 +# stx_address: ST3NBRSFKX28FQ2ZJ1MAKX58HKHSDGNV5N7R21XCP +# btc_address: n2v875jbJ4RjBnTjgbfikDfnwsDV5iUByw + +[accounts.wallet_9] +mnemonic = "shadow private easily thought say logic fault paddle word top book during ignore notable orange flight clock image wealth health outside kitten belt reform" +balance = 100_000_000_000_000 +# secret_key: de433bdfa14ec43aa1098d5be594c8ffb20a31485ff9de2923b2689471c401b801 +# stx_address: STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6 +# btc_address: mjSrB3wS4xab3kYqFktwBzfTdPg367ZJ2d + diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts new file mode 100644 index 0000000000..15f4d4ddc0 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -0,0 +1,162 @@ +import { it } from "vitest"; +import { initSimnet } from "@hirosystems/clarinet-sdk"; +import { Real, Stub } from "./pox_CommandModel.ts"; + +import { + getPublicKeyFromPrivate, + publicKeyToBtcAddress, +} from "@stacks/encryption"; +import { StacksDevnet } from "@stacks/network"; +import { + createStacksPrivateKey, + getAddressFromPrivateKey, + TransactionVersion, +} from "@stacks/transactions"; +import { StackingClient } from "@stacks/stacking"; + +import fc from "fast-check"; +import { PoxCommands } from "./pox_Commands.ts"; + +import fs from "fs"; +import path from "path"; + +it("statefully interacts with PoX-4", async () => { + // SUT stands for "System Under Test". + const sut: Real = { + network: await initSimnet(), + }; + + const wallets = [ + [ + "wallet_1", + "7287ba251d44a4d3fd9276c88ce34c5c52a038955511cccaf77e61068649c17801", + ], + [ + "wallet_2", + "530d9f61984c888536871c6573073bdfc0058896dc1adfe9a6a10dfacadc209101", + ], + [ + "wallet_3", + "d655b2523bcd65e34889725c73064feb17ceb796831c0e111ba1a552b0f31b3901", + ], + [ + "wallet_4", + "f9d7206a47f14d2870c163ebab4bf3e70d18f5d14ce1031f3902fbbc894fe4c701", + ], + [ + "wallet_5", + "3eccc5dac8056590432db6a35d52b9896876a3d5cbdea53b72400bc9c2099fe801", + ], + [ + "wallet_6", + "7036b29cb5e235e5fd9b09ae3e8eec4404e44906814d5d01cbca968a60ed4bfb01", + ], + [ + "wallet_7", + "b463f0df6c05d2f156393eee73f8016c5372caa0e9e29a901bb7171d90dc4f1401", + ], + [ + "wallet_8", + "6a1a754ba863d7bab14adbbc3f8ebb090af9e871ace621d3e5ab634e1422885e01", + ], + [ + "wallet_9", + "de433bdfa14ec43aa1098d5be594c8ffb20a31485ff9de2923b2689471c401b801", + ], + ].map((wallet) => { + const label = wallet[0]; + const prvKey = wallet[1]; + const pubKey = getPublicKeyFromPrivate(prvKey); + const devnet = new StacksDevnet(); + const initialUstxBalance = 100_000_000_000_000; + const signerPrvKey = createStacksPrivateKey(prvKey); + const signerPubKey = getPublicKeyFromPrivate(signerPrvKey.data); + const btcAddress = publicKeyToBtcAddress(pubKey); + const stxAddress = getAddressFromPrivateKey( + prvKey, + TransactionVersion.Testnet, + ); + + return { + label, + stxAddress, + btcAddress, + signerPrvKey, + signerPubKey, + stackingClient: new StackingClient(stxAddress, devnet), + ustxBalance: initialUstxBalance, + isStacking: false, + hasDelegated: false, + lockedAddresses: [], + amountToCommit: 0, + poolMembers: [], + delegatedTo: "", + delegatedMaxAmount: 0, + delegatedUntilBurnHt: 0, + delegatedPoxAddress: "", + amountLocked: 0, + amountUnlocked: initialUstxBalance, + unlockHeight: 0, + firstLockedRewardCycle: 0, + allowedContractCaller: "", + callerAllowedBy: [], + committedRewCycleIndexes: [], + }; + }); + + // Track the number of times each command is run, so we can see if all the + // commands are run at least once. + const statistics = fs.readdirSync(path.join(__dirname)).filter((file) => + file.startsWith("pox_") && file.endsWith(".ts") && + file !== "pox_CommandModel.ts" && file !== "pox_Commands.ts" + ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. + + // This is the initial state of the model. + const model = new Stub( + new Map(wallets.map((wallet) => [wallet.stxAddress, wallet])), + new Map(wallets.map((wallet) => [wallet.stxAddress, { + ustxBalance: 100_000_000_000_000, + isStacking: false, + isStackingSolo: false, + hasDelegated: false, + lockedAddresses: [], + amountToCommit: 0, + poolMembers: [], + delegatedTo: "", + delegatedMaxAmount: 0, + delegatedUntilBurnHt: 0, + delegatedPoxAddress: "", + amountLocked: 0, + amountUnlocked: 100_000_000_000_000, + unlockHeight: 0, + firstLockedRewardCycle: 0, + allowedContractCaller: "", + callerAllowedBy: [], + committedRewCycleIndexes: [], + }])), + new Map(statistics.map((commandName) => [commandName, 0])), + ); + + simnet.setEpoch("3.0"); + + fc.assert( + fc.property( + PoxCommands(model.wallets, model.stackers, sut.network), + (cmds) => { + const initialState = () => ({ model: model, real: sut }); + fc.modelRun(initialState, cmds); + }, + ), + { + // Defines the number of test iterations to run; default is 100. + numRuns: 1000, + // Adjusts the level of detail in test reports. Default is 0 (minimal). + // At level 2, reports include extensive details, helpful for deep + // debugging. This includes not just the failing case and its seed, but + // also a comprehensive log of all executed steps and their outcomes. + verbose: 2, + }, + ); + + model.reportCommandRuns(); +}); diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts new file mode 100644 index 0000000000..dad1a381a5 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts @@ -0,0 +1,132 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { expect } from "vitest"; +import { + boolCV, + Cl, + ClarityType, + OptionalCV, + UIntCV, +} from "@stacks/transactions"; + +/** + * The `AllowContractCallerCommand` authorizes a `contract-caller` to call + * stacking methods. Normally, stacking methods can only be invoked by direct + * transactions (i.e., the tx-sender issues a direct contract-call to the + * stacking methods). By issuing an allowance, the tx-sender may call stacking + * methods through the allowed contract. + * + * There are no constraints for running this command. + */ +export class AllowContractCallerCommand implements PoxCommand { + readonly wallet: Wallet; + readonly allowanceTo: Wallet; + readonly allowUntilBurnHt: OptionalCV; + + /** + * Constructs an `AllowContractCallerCommand` that authorizes a + * `contract-caller` to call stacking methods. + * + * @param wallet - Represents the Stacker's wallet. + * @param allowanceTo - Represents the authorized `contract-caller` (i.e., a + * stacking pool). + * @param allowUntilBurnHt - The burn block height until which the + * authorization is valid. + */ + constructor( + wallet: Wallet, + allowanceTo: Wallet, + allowUntilBurnHt: OptionalCV, + ) { + this.wallet = wallet; + this.allowanceTo = allowanceTo; + this.allowUntilBurnHt = allowUntilBurnHt; + } + + check(): boolean { + // There are no constraints for running this command. + return true; + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + // Act + const allowContractCaller = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "allow-contract-caller", + [ + // (caller principal) + Cl.principal(this.allowanceTo.stxAddress), + // (until-burn-ht (optional uint)) + this.allowUntilBurnHt, + ], + this.wallet.stxAddress, + ); + + // Assert + expect(allowContractCaller.result).toBeOk(boolCV(true)); + + // Get the wallets involved from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + const callerAllowedBefore = wallet.allowedContractCaller; + + const callerAllowedBeforeState = model.stackers.get(callerAllowedBefore) || + null; + + if (callerAllowedBeforeState) { + // Remove the allower from the ex-allowed caller's allowance list. + + const walletIndexInsideAllowedByList = callerAllowedBeforeState + .callerAllowedBy.indexOf( + this.wallet.stxAddress, + ); + + expect(walletIndexInsideAllowedByList).toBeGreaterThan(-1); + + callerAllowedBeforeState.callerAllowedBy.splice( + walletIndexInsideAllowedByList, + 1, + ); + } + + const callerToAllow = model.stackers.get(this.allowanceTo.stxAddress)!; + // Update model so that we know this wallet has authorized a contract-caller. + + wallet.allowedContractCaller = this.allowanceTo.stxAddress; + callerToAllow.callerAllowedBy.push(this.wallet.stxAddress); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "allow-contract-caller", + this.allowanceTo.label, + "until", + optionalCVToString(this.allowUntilBurnHt), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.stxAddress} allow-contract-caller ${this.allowanceTo.stxAddress} until burn ht ${ + optionalCVToString(this.allowUntilBurnHt) + }`; + } +} + +const optionalCVToString = (optional: OptionalCV): string => + optional.type === ClarityType.OptionalSome + ? (optional.value as UIntCV).value.toString() + : "none"; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts new file mode 100644 index 0000000000..6d4d582b58 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts @@ -0,0 +1,212 @@ +import fc from "fast-check"; + +import { Simnet } from "@hirosystems/clarinet-sdk"; +import { + ClarityValue, + cvToValue, + StacksPrivateKey, +} from "@stacks/transactions"; +import { StackingClient } from "@stacks/stacking"; +import { + FIRST_BURNCHAIN_BLOCK_HEIGHT, + REWARD_CYCLE_LENGTH, +} from "./pox_Commands"; + +export type StxAddress = string; +export type BtcAddress = string; +export type CommandTag = string; + +export class Stub { + readonly wallets: Map; + readonly statistics: Map; + readonly stackers: Map; + stackingMinimum: number; + nextRewardSetIndex: number; + lastRefreshedCycle: number; + burnBlockHeight: number; + + constructor( + wallets: Map, + stackers: Map, + statistics: Map, + ) { + this.wallets = wallets; + this.statistics = statistics; + this.stackers = stackers; + this.stackingMinimum = 0; + this.nextRewardSetIndex = 0; + this.lastRefreshedCycle = 0; + this.burnBlockHeight = 0; + } + + trackCommandRun(commandName: string) { + const count = this.statistics.get(commandName) || 0; + this.statistics.set(commandName, count + 1); + } + + reportCommandRuns() { + console.log("Command run method execution counts:"); + this.statistics.forEach((count, commandName) => { + console.log(`${commandName}: ${count}`); + }); + } + + refreshStateForNextRewardCycle(real: Real) { + const burnBlockHeightResult = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightResult as ClarityValue), + ); + const lastRefreshedCycle = this.lastRefreshedCycle; + const currentRewCycle = Math.floor( + (Number(burnBlockHeight) - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + + // The `this.burnBlockHeight` instance member is used for logging purposes. + // However, it's not used in the actual implementation of the model and all + // usages below use the `burnBlockHeight` local variable. + this.burnBlockHeight = burnBlockHeight; + + if (lastRefreshedCycle < currentRewCycle) { + this.nextRewardSetIndex = 0; + + this.wallets.forEach((w) => { + let updatedAmountToCommit = 0; + const wallet = this.stackers.get(w.stxAddress)!; + + // Get the wallet's ex-delegators by comparing their delegatedUntilBurnHt + // to the current burn block height (only if the wallet is a delegatee). + const expiredDelegators = wallet.poolMembers.filter((stackerAddress) => + this.stackers.get(stackerAddress)!.delegatedUntilBurnHt < + burnBlockHeight + ); + + // Get the operator's pool stackers that no longer have partially commited + // STX for the next reward cycle by comparing their unlock height to + // the next reward cycle's first block (only if the wallet is an operator). + const stackersToRemoveAmountToCommit = wallet.lockedAddresses.filter(( + stackerAddress, + ) => + this.stackers.get(stackerAddress)!.unlockHeight <= + burnBlockHeight + REWARD_CYCLE_LENGTH + ); + + // Get the operator's ex-pool stackers by comparing their unlockHeight to + // the current burn block height (only if the wallet is an operator). + const expiredStackers = wallet.lockedAddresses.filter( + (stackerAddress) => + this.stackers.get(stackerAddress)!.unlockHeight <= + burnBlockHeight, + ); + + // For each remaining pool stacker (if any), increase the operator's + // amountToCommit (partial-stacked) for the next cycle by the + // stacker's amountLocked. + wallet.lockedAddresses.forEach((stacker) => { + const stackerWallet = this.stackers.get(stacker)!; + updatedAmountToCommit += stackerWallet?.amountLocked; + }); + + // Update the operator's amountToCommit (partial-stacked). + wallet.amountToCommit = updatedAmountToCommit; + + // Remove the expired delegators from the delegatee's poolMembers list. + expiredDelegators.forEach((expDelegator) => { + const expDelegatorIndex = wallet.poolMembers.indexOf(expDelegator); + wallet.poolMembers.splice(expDelegatorIndex, 1); + }); + + // Remove the expired stackers from the operator's lockedAddresses list. + expiredStackers.forEach((expStacker) => { + const expStackerIndex = wallet.lockedAddresses.indexOf(expStacker); + wallet.lockedAddresses.splice(expStackerIndex, 1); + }); + + // For each pool stacker that no longer have partially commited STX for + // the next reward cycle, decrement the operator's amountToCommit + // (partial-stacked) by the stacker's amountLocked. + stackersToRemoveAmountToCommit.forEach((expStacker) => { + const expStackerWallet = this.stackers.get(expStacker)!; + wallet.amountToCommit -= expStackerWallet.amountLocked; + }); + + // Check the wallet's stack expiry and update the state accordingly. + if ( + wallet.unlockHeight > 0 && wallet.unlockHeight <= burnBlockHeight + ) { + wallet.isStacking = false; + wallet.isStackingSolo = false; + wallet.amountUnlocked += wallet.amountLocked; + wallet.amountLocked = 0; + wallet.unlockHeight = 0; + wallet.firstLockedRewardCycle = 0; + } // If the wallet is solo stacking and its stack won't expire in the + // next reward cycle, increment the model's nextRewardSetIndex (the + // next empty reward slot) + else if ( + wallet.unlockHeight > 0 && + wallet.unlockHeight > burnBlockHeight + REWARD_CYCLE_LENGTH && + wallet.isStackingSolo + ) { + this.nextRewardSetIndex++; + } + wallet.committedRewCycleIndexes = []; + }); + this.lastRefreshedCycle = currentRewCycle; + } + } +} + +export type Real = { + network: Simnet; +}; + +export type Wallet = { + label: string; + stxAddress: string; + btcAddress: string; + signerPrvKey: StacksPrivateKey; + signerPubKey: string; + stackingClient: StackingClient; +}; + +export type Stacker = { + ustxBalance: number; + isStacking: boolean; + isStackingSolo: boolean; + hasDelegated: boolean; + lockedAddresses: StxAddress[]; + amountToCommit: number; + poolMembers: StxAddress[]; + delegatedTo: StxAddress; + delegatedMaxAmount: number; + delegatedUntilBurnHt: number; + delegatedPoxAddress: BtcAddress; + amountLocked: number; + amountUnlocked: number; + unlockHeight: number; + firstLockedRewardCycle: number; + allowedContractCaller: StxAddress; + callerAllowedBy: StxAddress[]; + committedRewCycleIndexes: number[]; +}; + +export type PoxCommand = fc.Command; + +export const logCommand = (...items: (string | undefined)[]) => { + // Ensure we only render up to the first 10 items for brevity. + const renderItems = items.slice(0, 10); + const columnWidth = 23; // Standard width for each column after the first two. + const halfColumns = Math.floor(columnWidth / 2); + + // Pad columns to their widths: half for the first two, full for the rest. + const prettyPrint = renderItems.map((content, index) => + // Check if the index is less than 2 (i.e., first two items). + content + ? (index < 2 ? content.padEnd(halfColumns) : content.padEnd(columnWidth)) + : (index < 2 ? "".padEnd(halfColumns) : "".padEnd(columnWidth)) + ); + prettyPrint.push("\n"); + + process.stdout.write(prettyPrint.join("")); +}; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts new file mode 100644 index 0000000000..ba7043d5ec --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts @@ -0,0 +1,489 @@ +import fc from "fast-check"; +import { Real, Stacker, Stub, StxAddress, Wallet } from "./pox_CommandModel"; +import { GetStackingMinimumCommand } from "./pox_GetStackingMinimumCommand"; +import { GetStxAccountCommand } from "./pox_GetStxAccountCommand"; +import { StackStxSigCommand } from "./pox_StackStxSigCommand"; +import { StackStxAuthCommand } from "./pox_StackStxAuthCommand"; +import { DelegateStxCommand } from "./pox_DelegateStxCommand"; +import { DelegateStackStxCommand } from "./pox_DelegateStackStxCommand"; +import { Simnet } from "@hirosystems/clarinet-sdk"; +import { Cl, cvToValue, OptionalCV, UIntCV } from "@stacks/transactions"; +import { RevokeDelegateStxCommand } from "./pox_RevokeDelegateStxCommand"; +import { AllowContractCallerCommand } from "./pox_AllowContractCallerCommand"; +import { DelegateStackIncreaseCommand } from "./pox_DelegateStackIncreaseCommand"; +import { DelegateStackExtendCommand } from "./pox_DelegateStackExtendCommand"; +import { StackAggregationCommitAuthCommand } from "./pox_StackAggregationCommitAuthCommand"; +import { StackAggregationCommitSigCommand } from "./pox_StackAggregationCommitSigCommand"; +import { StackAggregationCommitIndexedSigCommand } from "./pox_StackAggregationCommitIndexedSigCommand"; +import { StackAggregationCommitIndexedAuthCommand } from "./pox_StackAggregationCommitIndexedAuthCommand"; +import { StackAggregationIncreaseCommand } from "./pox_StackAggregationIncreaseCommand"; +import { DisallowContractCallerCommand } from "./pox_DisallowContractCallerCommand"; +import { StackExtendAuthCommand } from "./pox_StackExtendAuthCommand"; +import { StackExtendSigCommand } from "./pox_StackExtendSigCommand"; +import { StackIncreaseAuthCommand } from "./pox_StackIncreaseAuthCommand"; +import { StackIncreaseSigCommand } from "./pox_StackIncreaseSigCommand"; + +export function PoxCommands( + wallets: Map, + stackers: Map, + network: Simnet, +): fc.Arbitrary>> { + const cmds = [ + // GetStackingMinimumCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + }).map(( + r: { + wallet: Wallet; + }, + ) => + new GetStackingMinimumCommand( + r.wallet, + ) + ), + // StackStxSigCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand( + r.wallet, + r.authId, + r.period, + r.margin, + ) + ), + // StackStxAuthCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand( + r.wallet, + r.authId, + r.period, + r.margin, + ) + ), + // StackExtendAuthCommand + fc + .record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }) + .map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + ), + ), + // StackExtendSigCommand + fc + .record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }) + .map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + ), + ), + // StackIncreaseAuthCommand + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + authId: fc.nat(), + }) + .map((r) => { + return new StackIncreaseAuthCommand( + r.operator, + r.increaseBy, + r.authId, + ); + }), + // StackIncreaseSigCommand + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + authId: fc.nat(), + }) + .map((r) => { + return new StackIncreaseSigCommand( + r.operator, + r.increaseBy, + r.authId, + ); + }), + // GetStackingMinimumCommand + fc + .record({ + wallet: fc.constantFrom(...wallets.values()), + }) + .map((r: { wallet: Wallet }) => new GetStackingMinimumCommand(r.wallet)), + // DelegateStxCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + delegateTo: fc.constantFrom(...wallets.values()), + untilBurnHt: fc.integer({ min: 1 }), + amount: fc.bigInt({ min: 0n, max: 100_000_000_000_000n }), + }).map(( + r: { + wallet: Wallet; + delegateTo: Wallet; + untilBurnHt: number; + amount: bigint; + }, + ) => + new DelegateStxCommand( + r.wallet, + r.delegateTo, + r.untilBurnHt, + r.amount, + ) + ), + // StackAggregationCommitAuthCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map(( + r: { + wallet: Wallet; + authId: number; + }, + ) => + new StackAggregationCommitAuthCommand( + r.wallet, + r.authId, + ) + ), + // StackAggregationCommitSigCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map(( + r: { + wallet: Wallet; + authId: number; + }, + ) => + new StackAggregationCommitSigCommand( + r.wallet, + r.authId, + ) + ), + // StackAggregationCommitIndexedAuthCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map(( + r: { + wallet: Wallet; + authId: number; + }, + ) => + new StackAggregationCommitIndexedAuthCommand( + r.wallet, + r.authId, + ) + ), + // StackAggregationCommitIndexedSigCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map(( + r: { + wallet: Wallet; + authId: number; + }, + ) => + new StackAggregationCommitIndexedSigCommand( + r.wallet, + r.authId, + ) + ), + // StackAggregationIncreaseCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).chain((r) => { + const operator = stackers.get(r.wallet.stxAddress)!; + const committedRewCycleIndexesOrFallback = + operator.committedRewCycleIndexes.length > 0 + ? operator.committedRewCycleIndexes + : [-1]; + return fc.record({ + rewardCycleIndex: fc.constantFrom( + ...committedRewCycleIndexesOrFallback, + ), + }).map((cycleIndex) => ({ ...r, ...cycleIndex })); + }).map(( + r: { + wallet: Wallet; + rewardCycleIndex: number; + authId: number; + }, + ) => + new StackAggregationIncreaseCommand( + r.wallet, + r.rewardCycleIndex, + r.authId, + ) + ), + // RevokeDelegateStxCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + }).map(( + r: { + wallet: Wallet; + }, + ) => + new RevokeDelegateStxCommand( + r.wallet, + ) + ), + // DelegateStackStxCommand + fc.record({ + operator: fc.constantFrom(...wallets.values()), + startBurnHt: fc.integer({ + min: currentCycleFirstBlock(network), + max: nextCycleFirstBlock(network), + }), + period: fc.integer({ min: 1, max: 12 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + // Determine available stackers based on the operator + const availableStackers = operator.poolMembers.length > 0 + ? operator.poolMembers + : [r.operator.stxAddress]; + + return fc.record({ + stacker: fc.constantFrom(...availableStackers), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })).chain((resultWithStacker) => { + return fc.record({ + unlockBurnHt: fc.constant( + currentCycleFirstBlock(network) + + 1050 * (resultWithStacker.period + 1), + ), + }).map((additionalProps) => ({ + ...resultWithStacker, + ...additionalProps, + })); + }).chain((resultWithUnlockHeight) => { + return fc.record({ + amount: fc.bigInt({ + min: 0n, + max: BigInt( + stackers.get(resultWithUnlockHeight.stacker.stxAddress)! + .delegatedMaxAmount, + ), + }), + }).map((amountProps) => ({ + ...resultWithUnlockHeight, + ...amountProps, + })); + }); + }).map((finalResult) => { + return new DelegateStackStxCommand( + finalResult.operator, + finalResult.stacker, + finalResult.period, + finalResult.amount, + finalResult.unlockBurnHt, + ); + }), + // DelegateStackIncreaseCommand + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + }) + .chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc + .record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + }) + .map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })); + }) + .map((final) => { + return new DelegateStackIncreaseCommand( + final.operator, + final.stacker, + final.increaseBy, + ); + }), + // DelegateStackExtendCommand + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.integer({ min: 1, max: 11 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + currentCycle: fc.constant(currentCycle(network)), + }) + .map((additionalProps) => ({ + ...r, + stacker: wallets.get(additionalProps.stacker)!, + currentCycle: additionalProps.currentCycle, + })); + }).map((final) => + new DelegateStackExtendCommand( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + ) + ), + // AllowContractCallerCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + allowanceTo: fc.constantFrom(...wallets.values()), + alllowUntilBurnHt: fc.oneof( + fc.constant(Cl.none()), + fc.integer({ min: 1 }).map((value) => Cl.some(Cl.uint(value))), + ), + }) + .map( + (r: { + wallet: Wallet; + allowanceTo: Wallet; + alllowUntilBurnHt: OptionalCV; + }) => + new AllowContractCallerCommand( + r.wallet, + r.allowanceTo, + r.alllowUntilBurnHt, + ), + ), + // DisallowContractCallerCommand + fc.record({ + stacker: fc.constantFrom(...wallets.values()), + callerToDisallow: fc.constantFrom(...wallets.values()), + }).map( + (r: { + stacker: Wallet; + callerToDisallow: Wallet; + }) => + new DisallowContractCallerCommand( + r.stacker, + r.callerToDisallow, + ), + ), + // GetStxAccountCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + }).map(( + r: { + wallet: Wallet; + }, + ) => + new GetStxAccountCommand( + r.wallet, + ) + ), + ]; + + // More on size: https://github.com/dubzzz/fast-check/discussions/2978 + // More on cmds: https://github.com/dubzzz/fast-check/discussions/3026 + return fc.commands(cmds, { size: "xsmall" }); +} + +export const REWARD_CYCLE_LENGTH = 1050; + +export const FIRST_BURNCHAIN_BLOCK_HEIGHT = 0; + +export const currentCycle = (network: Simnet) => + Number(cvToValue( + network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "current-pox-reward-cycle", + [], + "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ).result, + )); + +export const currentCycleFirstBlock = (network: Simnet) => + Number(cvToValue( + network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(currentCycle(network))], + "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ).result, + )); + +const nextCycleFirstBlock = (network: Simnet) => + Number(cvToValue( + network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(currentCycle(network) + 1)], + "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ).result, + )); diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts new file mode 100644 index 0000000000..cfd385cf5a --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts @@ -0,0 +1,168 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { Cl, ClarityType, isClarityType } from "@stacks/transactions"; +import { + FIRST_BURNCHAIN_BLOCK_HEIGHT, + REWARD_CYCLE_LENGTH, +} from "./pox_Commands.ts"; + +/** + * The `DelegateStackExtendCommand` allows a pool operator to + * extend an active stacking lock, issuing a "partial commitment" + * for the extended-to cycles. + * + * This method extends stacker's current lockup for an additional + * extend-count and partially commits those new cycles to `pox-addr`. + * + * Constraints for running this command include: + * - Stacker must have locked uSTX. + * - The Operator has to currently be delegated by the Stacker. + * - The new lock period must be less than or equal to 12. + */ +export class DelegateStackExtendCommand implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly extendCount: number; + readonly currentCycle: number; + + /** + * Constructs a `DelegateStackExtendCommand` to extend the unlock + * height as a Pool Operator on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the STacker's wallet. + * @param extendCount - Represents the cycles to be expended. + * @param currentCycle - Represents the current PoX reward cycle. + */ + constructor( + operator: Wallet, + stacker: Wallet, + extendCount: number, + currentCycle: number, + ) { + this.operator = operator; + this.stacker = stacker; + this.extendCount = extendCount; + this.currentCycle = currentCycle; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - Stacker must have locked uSTX. + // - The Stacker's uSTX must have been locked by the Operator. + // - The Operator has to currently be delegated by the Stacker. + // - The new lock period must be less than or equal to 12. + + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + + const firstRewardCycle = + this.currentCycle > stackerWallet.firstLockedRewardCycle + ? this.currentCycle + : stackerWallet.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stackerWallet.amountLocked; + + return ( + stackerWallet.amountLocked > 0 && + stackerWallet.hasDelegated === true && + stackerWallet.isStacking === true && + stackerWallet.delegatedTo === this.operator.stxAddress && + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight && + stackerWallet.delegatedMaxAmount >= stackedAmount && + operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + operatorWallet.lockedAddresses.includes(this.stacker.stxAddress) && + totalPeriod <= 12 + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + + // Act + const delegateStackExtend = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-extend", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(stackerWallet.delegatedPoxAddress), + // (extend-count uint) + Cl.uint(this.extendCount), + ], + this.operator.stxAddress, + ); + + const { result: firstExtendCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(stackerWallet.unlockHeight)], + this.operator.stxAddress, + ); + assert(isClarityType(firstExtendCycle, ClarityType.UInt)); + + const lastExtendCycle = Number(firstExtendCycle.value) + this.extendCount - + 1; + + const { result: extendedUnlockHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(lastExtendCycle + 1)], + this.operator.stxAddress, + ); + assert(isClarityType(extendedUnlockHeight, ClarityType.UInt)); + const newUnlockHeight = extendedUnlockHeight.value; + + // Assert + expect(delegateStackExtend.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.stacker.stxAddress), + "unlock-burn-height": Cl.uint(newUnlockHeight), + }), + ); + + // Get the Stacker's wallet from the model and update it with the new state. + // Update model so that we know this wallet's unlock height was extended. + stackerWallet.unlockHeight = Number(newUnlockHeight); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label} Ӿ ${this.stacker.label}`, + "delegate-stack-extend", + "extend count", + this.extendCount.toString(), + "new unlock height", + stackerWallet.unlockHeight.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} delegate-stack-extend extend count ${this.extendCount}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts new file mode 100644 index 0000000000..b9ec4a837c --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts @@ -0,0 +1,138 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +/** + * The DelegateStackIncreaseCommand allows a pool operator to + * increase an active stacking lock, issuing a "partial commitment" + * for the increased cycles. + * + * This method increases stacker's current lockup and partially + * commits the additional STX to `pox-addr`. + * + * Constraints for running this command include: + * - The Stacker must have locked uSTX. + * - The Operator has to currently be delegated by the Stacker. + * - The increase amount must be greater than 0. + * - Stacker's unlocked uSTX amount must be greater than or equal + * to the value of the increase amount. + * - Stacker's maximum delegated amount must be greater than or equal + * to the final locked amount. + * - The Operator must have locked the Stacker's previously locked funds. + */ +export class DelegateStackIncreaseCommand implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly increaseBy: number; + + /** + * Constructs a DelegateStackIncreaseCommand to increase the uSTX amount + * previously locked on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + */ + constructor(operator: Wallet, stacker: Wallet, increaseBy: number) { + this.operator = operator; + this.stacker = stacker; + this.increaseBy = increaseBy; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker must have locked uSTX. + // - The Operator has to currently be delegated by the Stacker. + // - The increase amount must be greater than 0. + // - Stacker's unlocked uSTX amount must be greater than or equal + // to the value of the increase amount. + // - Stacker's maximum delegated amount must be greater than or equal + // to the final locked amount. + // - The Operator must have locked the Stacker's previously locked funds. + + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + + return ( + stackerWallet.amountLocked > 0 && + stackerWallet.hasDelegated === true && + stackerWallet.isStacking === true && + this.increaseBy > 0 && + operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + stackerWallet.amountUnlocked >= this.increaseBy && + stackerWallet.delegatedMaxAmount >= + this.increaseBy + stackerWallet.amountLocked && + operatorWallet.lockedAddresses.indexOf(this.stacker.stxAddress) > -1 + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + const prevLocked = stackerWallet.amountLocked; + const newTotalLocked = prevLocked + this.increaseBy; + // Act + const delegateStackIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-increase", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(stackerWallet.delegatedPoxAddress), + // (increase-by uint) + Cl.uint(this.increaseBy), + ], + this.operator.stxAddress, + ); + + // Assert + expect(delegateStackIncrease.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.stacker.stxAddress), + "total-locked": Cl.uint(newTotalLocked), + }), + ); + + // Get the Stacker's wallet from the model and update it with the new state. + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + // Update model so that we know this stacker has increased the stacked amount. + // Update locked and unlocked fields in the model. + stackerWallet.amountLocked = newTotalLocked; + stackerWallet.amountUnlocked = stackerWallet.amountUnlocked - + this.increaseBy; + operatorWallet.amountToCommit += this.increaseBy; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label} Ӿ ${this.stacker.label}`, + "delegate-stack-increase", + "increased by", + this.increaseBy.toString(), + "previously locked", + prevLocked.toString(), + "total locked", + stackerWallet.amountLocked.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} delegate-stack-increase by ${this.increaseBy}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts new file mode 100644 index 0000000000..456983807f --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts @@ -0,0 +1,191 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { + Cl, + ClarityType, + ClarityValue, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `DelegateStackStxCommand` locks STX for stacking within PoX-4 on behalf + * of a delegator. This operation allows the `operator` to stack the `stacker`'s + * STX. + * + * Constraints for running this command include: + * - A minimum threshold of uSTX must be met, determined by the + * `get-stacking-minimum` function at the time of this call. + * - The Stacker cannot currently be engaged in another stacking operation. + * - The Stacker has to currently be delegating to the Operator. + * - The stacked STX amount should be less than or equal to the delegated + * amount. + * - The stacked uSTX amount should be less than or equal to the Stacker's + * balance. + * - The stacked uSTX amount should be greater than or equal to the minimum + * threshold of uSTX. + * - The Operator has to currently be delegated by the Stacker. + * - The Period has to fit the last delegation burn block height. + */ +export class DelegateStackStxCommand implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly period: number; + readonly amountUstx: bigint; + readonly unlockBurnHt: number; + + /** + * Constructs a `DelegateStackStxCommand` to lock uSTX as a Pool Operator + * on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the STacker's wallet. + * @param period - Number of reward cycles to lock uSTX. + * @param amountUstx - The uSTX amount stacked by the Operator on behalf + * of the Stacker. + * @param unlockBurnHt - The burn height at which the uSTX is unlocked. + */ + constructor( + operator: Wallet, + stacker: Wallet, + period: number, + amountUstx: bigint, + unlockBurnHt: number, + ) { + this.operator = operator; + this.stacker = stacker; + this.period = period; + this.amountUstx = amountUstx; + this.unlockBurnHt = unlockBurnHt; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - A minimum threshold of uSTX must be met, determined by the + // `get-stacking-minimum` function at the time of this call. + // - The Stacker cannot currently be engaged in another stacking + // operation. + // - The Stacker has to currently be delegating to the Operator. + // - The stacked uSTX amount should be less than or equal to the + // delegated amount. + // - The stacked uSTX amount should be less than or equal to the + // Stacker's balance. + // - The stacked uSTX amount should be greater than or equal to the + // minimum threshold of uSTX. + // - The Operator has to currently be delegated by the Stacker. + // - The Period has to fit the last delegation burn block height. + + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + + return ( + model.stackingMinimum > 0 && + !stackerWallet.isStacking && + stackerWallet.hasDelegated && + stackerWallet.delegatedMaxAmount >= Number(this.amountUstx) && + Number(this.amountUstx) <= stackerWallet.ustxBalance && + Number(this.amountUstx) >= model.stackingMinimum && + operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + // Act + const delegateStackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-stx", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (amount-ustx uint) + Cl.uint(this.amountUstx), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight), + // (lock-period uint) + Cl.uint(this.period), + ], + this.operator.stxAddress, + ); + const { result: rewardCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight)], + this.operator.stxAddress, + ); + assert(isClarityType(rewardCycle, ClarityType.UInt)); + + const { result: unlockBurnHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(Number(rewardCycle.value) + this.period + 1)], + this.operator.stxAddress, + ); + assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + + // Assert + expect(delegateStackStx.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.stacker.stxAddress), + "lock-amount": Cl.uint(this.amountUstx), + "unlock-burn-height": Cl.uint(Number(unlockBurnHeight.value)), + }), + ); + + // Get the Stacker's wallet from the model and update it with the new state. + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + // Update model so that we know this wallet is stacking. This is important + // in order to prevent the test from stacking multiple times with the same + // address. + stackerWallet.isStacking = true; + // Update locked, unlocked, and unlock-height fields in the model. + stackerWallet.amountLocked = Number(this.amountUstx); + stackerWallet.unlockHeight = Number(unlockBurnHeight.value); + stackerWallet.amountUnlocked -= Number(this.amountUstx); + stackerWallet.firstLockedRewardCycle = currentCycle(real.network) + 1; + // Add stacker to the operators lock list. This will help knowing that + // the stacker's funds are locked when calling delegate-stack-extend + // and delegate-stack-increase. + operatorWallet.lockedAddresses.push(this.stacker.stxAddress); + operatorWallet.amountToCommit += Number(this.amountUstx); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label} Ӿ ${this.stacker.label}`, + "delegate-stack-stx", + "lock-amount", + this.amountUstx.toString(), + "until", + stackerWallet.unlockHeight.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} delegate-stack-stx stacker ${this.stacker.label} period ${this.period}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts new file mode 100644 index 0000000000..4a12b0140d --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts @@ -0,0 +1,124 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { boolCV, Cl } from "@stacks/transactions"; + +/** + * The `DelegateStxCommand` delegates STX for stacking within PoX-4. This + * operation allows the `tx-sender` (the `wallet` in this case) to delegate + * stacking participation to a `delegatee`. + * + * Constraints for running this command include: + * - The Stacker cannot currently be a delegator in another delegation. + * - The PoX address provided should have a valid version (between 0 and 6 + * inclusive). + */ +export class DelegateStxCommand implements PoxCommand { + readonly wallet: Wallet; + readonly delegateTo: Wallet; + readonly untilBurnHt: number; + readonly amount: bigint; + + /** + * Constructs a `DelegateStxCommand` to delegate uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param delegateTo - Represents the Delegatee's STX address. + * @param untilBurnHt - The burn block height until the delegation is valid. + * @param amount - The maximum amount the `Stacker` delegates the `Delegatee` + * to stack on his behalf. + */ + constructor( + wallet: Wallet, + delegateTo: Wallet, + untilBurnHt: number, + amount: bigint, + ) { + this.wallet = wallet; + this.delegateTo = delegateTo; + this.untilBurnHt = untilBurnHt; + this.amount = amount; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker cannot currently be a delegator in another delegation. + + return ( + model.stackingMinimum > 0 && + !model.stackers.get(this.wallet.stxAddress)?.hasDelegated + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + // The amount of uSTX delegated by the Stacker to the Delegatee. + // Even if there are no constraints about the delegated amount, + // it will be checked in the future, when calling delegate-stack-stx. + const amountUstx = Number(this.amount); + + // Act + const delegateStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (delegate-to principal) + Cl.principal(this.delegateTo.stxAddress), + // (until-burn-ht (optional uint)) + Cl.some(Cl.uint(this.untilBurnHt)), + // (pox-addr (optional { version: (buff 1), hashbytes: (buff 32) })) + Cl.some(poxAddressToTuple(this.delegateTo.btcAddress)), + ], + this.wallet.stxAddress, + ); + + // Assert + expect(delegateStx.result).toBeOk(boolCV(true)); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + const delegatedWallet = model.stackers.get(this.delegateTo.stxAddress)!; + // Update model so that we know this wallet has delegated. This is important + // in order to prevent the test from delegating multiple times with the same + // address. + wallet.hasDelegated = true; + wallet.delegatedTo = this.delegateTo.stxAddress; + wallet.delegatedMaxAmount = amountUstx; + wallet.delegatedUntilBurnHt = this.untilBurnHt; + wallet.delegatedPoxAddress = this.delegateTo.btcAddress; + + delegatedWallet.poolMembers.push(this.wallet.stxAddress); + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "delegate-stx", + "amount", + amountUstx.toString(), + "delegated to", + this.delegateTo.label, + "until", + this.untilBurnHt.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} delegate-stx to ${this.delegateTo.label} until burn ht ${this.untilBurnHt}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts new file mode 100644 index 0000000000..09618db49c --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts @@ -0,0 +1,108 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { expect } from "vitest"; +import { boolCV, Cl } from "@stacks/transactions"; + +/** + * The `DisallowContractCallerComand` revokes a `contract-caller`'s + * authorization to call stacking methods. + * + * Constraints for running this command include: + * - The Caller to be disallowed must have been previously + * allowed by the Operator. + */ +export class DisallowContractCallerCommand implements PoxCommand { + readonly stacker: Wallet; + readonly callerToDisallow: Wallet; + + /** + * Constructs a `DisallowContractCallerComand` to revoke authorization + * for calling stacking methods. + * + * @param stacker - Represents the `Stacker`'s wallet. + * @param callerToDisallow - The `contract-caller` to be revoked. + */ + constructor(stacker: Wallet, callerToDisallow: Wallet) { + this.stacker = stacker; + this.callerToDisallow = callerToDisallow; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Caller to be disallowed must have been previously allowed + // by the Operator. + + const stacker = model.stackers.get(this.stacker.stxAddress)!; + const callerToDisallow = model.stackers.get( + this.callerToDisallow.stxAddress, + )!; + return ( + stacker.allowedContractCaller === this.callerToDisallow.stxAddress && + callerToDisallow.callerAllowedBy.includes( + this.stacker.stxAddress, + ) === + true + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + // Act + const disallowContractCaller = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "disallow-contract-caller", + [ + // (caller principal) + Cl.principal(this.callerToDisallow.stxAddress), + ], + this.stacker.stxAddress, + ); + + // Assert + expect(disallowContractCaller.result).toBeOk(boolCV(true)); + + // Get the wallet to be revoked stacking rights from the model and + // update it with the new state. + const callerToDisallow = model.stackers.get( + this.callerToDisallow.stxAddress, + )!; + + // Update model so that we know that the stacker has revoked stacking + // allowance. + const stacker = model.stackers.get(this.stacker.stxAddress)!; + stacker.allowedContractCaller = ""; + + // Remove the operator from the caller to disallow's allowance list. + const walletIndexAllowedByList = callerToDisallow.callerAllowedBy.indexOf( + this.stacker.stxAddress, + ); + + expect(walletIndexAllowedByList).toBeGreaterThan(-1); + callerToDisallow.callerAllowedBy.splice(walletIndexAllowedByList, 1); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.stacker.label}`, + "disallow-contract-caller", + this.callerToDisallow.label, + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.stacker.label} disallow-contract-caller ${this.callerToDisallow.label}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_GetStackingMinimumCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_GetStackingMinimumCommand.ts new file mode 100644 index 0000000000..50dd7bf16c --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_GetStackingMinimumCommand.ts @@ -0,0 +1,70 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { assert } from "vitest"; +import { ClarityType, isClarityType } from "@stacks/transactions"; + +/** + * Implements the `PoxCommand` interface to get the minimum stacking amount + * required for a given reward cycle. + */ +export class GetStackingMinimumCommand implements PoxCommand { + readonly wallet: Wallet; + + /** + * Constructs a new `GetStackingMinimumCommand`. + * + * @param wallet The wallet information, including the STX address used to + * query the stacking minimum requirement. + */ + constructor(wallet: Wallet) { + this.wallet = wallet; + } + + check(_model: Readonly): boolean { + // There are no constraints for running this command. + return true; + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + // Act + const { result: stackingMinimum } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "get-stacking-minimum", + [], + this.wallet.stxAddress, + ); + assert(isClarityType(stackingMinimum, ClarityType.UInt)); + + // Update the model with the new stacking minimum. This is important for + // the `check` method of the `StackStxCommand` class to work correctly, as + // we as other tests that may depend on the stacking minimum. + model.stackingMinimum = Number(stackingMinimum.value); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "get-stacking-minimum", + "pox-4", + stackingMinimum.value.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} get-stacking-minimum`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_GetStxAccountCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_GetStxAccountCommand.ts new file mode 100644 index 0000000000..60d8ff38b2 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_GetStxAccountCommand.ts @@ -0,0 +1,72 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +/** + * Implements the `PoxCommand` interface to get the info returned from the + * `stx-account`. + */ +export class GetStxAccountCommand implements PoxCommand { + readonly wallet: Wallet; + + /** + * Constructs a new `GetStxAccountCommand`. + * + * @param wallet The wallet information, including the STX address used to + * query the `stx-account`. + */ + constructor(wallet: Wallet) { + this.wallet = wallet; + } + + check(_model: Readonly): boolean { + // There are no constraints for running this command. + return true; + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + const actual = model.stackers.get(this.wallet.stxAddress)!; + expect(real.network.runSnippet(`(stx-account '${this.wallet.stxAddress})`)) + .toBeTuple({ + "locked": Cl.uint(actual.amountLocked), + "unlocked": Cl.uint(actual.amountUnlocked), + "unlock-height": Cl.uint(actual.unlockHeight), + }); + + expect(actual.amountLocked + actual.amountUnlocked).toBe( + actual.ustxBalance, + ); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stx-account", + "lock-amount", + actual.amountLocked.toString(), + "unlocked-amount", + actual.amountUnlocked.toString(), + "unlocked-height", + actual.unlockHeight.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stx-account`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts new file mode 100644 index 0000000000..1c30e3d569 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts @@ -0,0 +1,107 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl, someCV, tupleCV } from "@stacks/transactions"; + +/** + * The `RevokeDelegateStxCommand` revokes the delegation for stacking within + * PoX-4. + * + * Constraints for running this command include: + * - The `Stacker` has to currently be delegating. + */ +export class RevokeDelegateStxCommand implements PoxCommand { + readonly wallet: Wallet; + + /** + * Constructs a RevokeDelegateStxCommand to revoke delegate uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + */ + constructor(wallet: Wallet) { + this.wallet = wallet; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker has to currently be delegating. + + return ( + model.stackingMinimum > 0 && + model.stackers.get(this.wallet.stxAddress)!.hasDelegated === true + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + const wallet = model.stackers.get(this.wallet.stxAddress)!; + const operatorWallet = model.stackers.get(wallet.delegatedTo)!; + + // Act + const revokeDelegateStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "revoke-delegate-stx", + [], + this.wallet.stxAddress, + ); + + // Assert + expect(revokeDelegateStx.result).toBeOk( + someCV( + tupleCV({ + "amount-ustx": Cl.uint(wallet.delegatedMaxAmount), + "delegated-to": Cl.principal( + model.stackers.get(this.wallet.stxAddress)!.delegatedTo || "", + ), + "pox-addr": Cl.some( + poxAddressToTuple(wallet.delegatedPoxAddress || ""), + ), + "until-burn-ht": Cl.some(Cl.uint(wallet.delegatedUntilBurnHt)), + }), + ), + ); + + // Get the Stacker's wallet from the model and update the two wallets + // involved with the new state. + // Update model so that we know this wallet is not delegating anymore. + // This is important in order to prevent the test from revoking the + // delegation multiple times with the same address. + wallet.hasDelegated = false; + wallet.delegatedTo = ""; + wallet.delegatedUntilBurnHt = 0; + wallet.delegatedMaxAmount = 0; + wallet.delegatedPoxAddress = ""; + + // Remove the Stacker from the Pool Operator's pool members list. + const walletIndexInDelegatorsList = operatorWallet.poolMembers.indexOf( + this.wallet.stxAddress, + ); + expect(walletIndexInDelegatorsList).toBeGreaterThan(-1); + operatorWallet.poolMembers.splice(walletIndexInDelegatorsList, 1); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "revoke-delegate-stx", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.stxAddress} revoke-delegate-stx`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts new file mode 100644 index 0000000000..5312679833 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts @@ -0,0 +1,137 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `StackAggregationCommitAuthCommand` allows an operator to commit + * partially stacked STX & to allocate a new PoX reward address slot. + * This allows a stacker to lock fewer STX than the minimal threshold + * in multiple transactions, so long as: + * 1. The pox-addr is the same. + * 2. The "commit" transaction is called _before_ the PoX anchor block. + * + * This command calls stack-aggregation-commit using an `authorization`. + * + * Constraints for running this command include: + * - The Operator must have locked STX on behalf of at least one stacker. + * - The total amount previously locked by the Operator on behalf of the + * stackers has to be greater than the uSTX threshold. + */ +export class StackAggregationCommitAuthCommand implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + + /** + * Constructs a `StackAggregationCommitAuthCommand` to lock uSTX for stacking. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + */ + constructor( + operator: Wallet, + authId: number, + ) { + this.operator = operator; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Operator must have locked STX on behalf of at least one stacker. + // - The total amount previously locked by the Operator on behalf of the + // stackers has to be greater than the uSTX threshold. + + const operator = model.stackers.get(this.operator.stxAddress)!; + return operator.lockedAddresses.length > 0 && + operator.amountToCommit >= model.stackingMinimum; + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const { result: setSignature } = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + expect(setSignature).toBeOk(Cl.bool(true)); + + // Act + const stackAggregationCommit = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationCommit.result).toBeOk(Cl.bool(true)); + + operatorWallet.amountToCommit -= committedAmount; + operatorWallet.committedRewCycleIndexes.push(model.nextRewardSetIndex); + model.nextRewardSetIndex++; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label}`, + "stack-agg-commit", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit auth-id ${this.authId}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts new file mode 100644 index 0000000000..dfe7f2beef --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts @@ -0,0 +1,145 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `StackAggregationCommitIndexedAuthCommand` allows an operator to + * commit partially stacked STX & to allocate a new PoX reward address + * slot. + * This allows a stacker to lock fewer STX than the minimal threshold + * in multiple transactions, so long as: + * 1. The pox-addr is the same. + * 2. The "commit" transaction is called _before_ the PoX anchor block. + * + * This command calls `stack-aggregation-commit-indexed` using an + * `authorization`. + * + * Constraints for running this command include: + * - The Operator must have locked STX on behalf of at least one stacker. + * - The total amount previously locked by the Operator on behalf of the + * stackers has to be greater than the uSTX threshold. + */ +export class StackAggregationCommitIndexedAuthCommand implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + + /** + * Constructs a `StackAggregationCommitIndexedAuthCommand` to lock uSTX + * for stacking. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + */ + constructor( + operator: Wallet, + authId: number, + ) { + this.operator = operator; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Operator must have locked STX on behalf of at least one stacker. + // - The total amount previously locked by the Operator on behalf of the + // stackers has to be greater than the uSTX threshold. + + const operator = model.stackers.get(this.operator.stxAddress)!; + return ( + operator.lockedAddresses.length > 0 && + operator.amountToCommit >= model.stackingMinimum + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const { result: setSignature } = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + expect(setSignature).toBeOk(Cl.bool(true)); + + // Act + const stackAggregationCommitIndexed = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit-indexed", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationCommitIndexed.result).toBeOk( + Cl.uint(model.nextRewardSetIndex), + ); + + // Update the model + operatorWallet.amountToCommit -= committedAmount; + operatorWallet.committedRewCycleIndexes.push(model.nextRewardSetIndex); + model.nextRewardSetIndex++; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label}`, + "stack-agg-commit-indexed", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit-indexed auth-id ${this.authId}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts new file mode 100644 index 0000000000..59707e21f4 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts @@ -0,0 +1,145 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `StackAggregationCommitIndexedSigCommand` allows an operator to + * commit partially stacked STX & to allocate a new PoX reward address + * slot. + * This allows a stacker to lock fewer STX than the minimal threshold + * in multiple transactions, so long as: + * 1. The pox-addr is the same. + * 2. The "commit" transaction is called _before_ the PoX anchor block. + * + * This command calls `stack-aggregation-commit-indexed` using a + * `signature`. + * + * Constraints for running this command include: + * - The Operator must have locked STX on behalf of at least one stacker. + * - The total amount previously locked by the Operator on behalf of the + * stackers has to be greater than the uSTX threshold. + */ +export class StackAggregationCommitIndexedSigCommand implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + + /** + * Constructs a `StackAggregationCommitIndexedSigCommand` to lock uSTX + * for stacking. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + */ + constructor( + operator: Wallet, + authId: number, + ) { + this.operator = operator; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Operator must have locked STX on behalf of at least one stacker. + // - The total amount previously locked by the Operator on behalf of the + // stackers has to be greater than the uSTX threshold. + + const operator = model.stackers.get(this.operator.stxAddress)!; + return ( + operator.lockedAddresses.length > 0 && + operator.amountToCommit >= model.stackingMinimum + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase or agg-commit. + topic: Pox4SignatureTopic.AggregateCommit, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: committedAmount, + }); + + // Act + const stackAggregationCommitIndexed = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit-indexed", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationCommitIndexed.result).toBeOk( + Cl.uint(model.nextRewardSetIndex), + ); + + // Update the model + operatorWallet.amountToCommit -= committedAmount; + operatorWallet.committedRewCycleIndexes.push(model.nextRewardSetIndex); + model.nextRewardSetIndex++; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label}`, + "stack-agg-commit-indexed", + "amount committed", + committedAmount.toString(), + "signature", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit-indexed auth-id ${this.authId}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts new file mode 100644 index 0000000000..32fe552477 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts @@ -0,0 +1,137 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `StackAggregationCommitSigCommand` allows an operator to commit + * partially stacked STX & to allocate a new PoX reward address slot. + * This allows a stacker to lock fewer STX than the minimal threshold + * in multiple transactions, so long as: + * 1. The pox-addr is the same. + * 2. This "commit" transaction is called _before_ the PoX anchor block. + * + * This command calls `stack-aggregation-commit` using a `signature`. + * + * Constraints for running this command include: + * - The Operator must have locked STX on behalf of at least one stacker. + * - The total amount previously locked by the Operator on behalf of the + * stackers has to be greater than the uSTX threshold. + */ +export class StackAggregationCommitSigCommand implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + + /** + * Constructs a `StackAggregationCommitSigCommand` to lock uSTX for stacking. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + */ + constructor( + operator: Wallet, + authId: number, + ) { + this.operator = operator; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Operator must have locked STX on behalf of at least one stacker. + // - The total amount previously locked by the Operator on behalf of the + // stackers has to be greater than the uSTX threshold. + + const operator = model.stackers.get(this.operator.stxAddress)!; + return operator.lockedAddresses.length > 0 && + operator.amountToCommit >= model.stackingMinimum; + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase or agg-commit. + topic: Pox4SignatureTopic.AggregateCommit, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: committedAmount, + }); + + // Act + const stackAggregationCommit = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationCommit.result).toBeOk(Cl.bool(true)); + + operatorWallet.amountToCommit -= committedAmount; + operatorWallet.committedRewCycleIndexes.push(model.nextRewardSetIndex); + model.nextRewardSetIndex++; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label}`, + "stack-agg-commit", + "amount committed", + committedAmount.toString(), + "signature", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit auth-id ${this.authId}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand.ts new file mode 100644 index 0000000000..22ae0a0bea --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand.ts @@ -0,0 +1,159 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl, cvToJSON } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `StackAggregationIncreaseCommand` allows an operator to commit + * partially stacked STX to a PoX address which has already received + * some STX (more than the `stacking minimum`). + * This allows a delegator to lock up marginally more STX from new + * delegates, even if they collectively do not exceed the Stacking + * minimum, so long as the target PoX address already represents at + * least as many STX as the `stacking minimum`. + * This command calls stack-aggregation-increase. + * + * Constraints for running this command include: + * - The Operator must have locked STX on behalf of at least one stacker. + * - The PoX address must have partial committed STX. + * - The Reward Cycle Index must be positive. + */ +export class StackAggregationIncreaseCommand implements PoxCommand { + readonly operator: Wallet; + readonly rewardCycleIndex: number; + readonly authId: number; + + /** + * Constructs a `StackAggregationIncreaseCommand` to commit partially + * stacked STX to a PoX address which has already received some STX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param rewardCycleIndex - The cycle index to increase the commit for. + * @param authId - Unique `auth-id` for the authorization. + */ + constructor( + operator: Wallet, + rewardCycleIndex: number, + authId: number, + ) { + this.operator = operator; + this.rewardCycleIndex = rewardCycleIndex; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Operator must have locked STX on behalf of at least one stacker. + // - The PoX address must have partial committed STX. + // - The Reward Cycle Index must be positive. + const operator = model.stackers.get(this.operator.stxAddress)!; + return ( + operator.lockedAddresses.length > 0 && + this.rewardCycleIndex >= 0 && + operator.amountToCommit > 0 + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const existingEntryCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-pox-address-list", + Cl.tuple({ + "index": Cl.uint(this.rewardCycleIndex), + "reward-cycle": Cl.uint(currentRewCycle + 1), + }), + ); + + const totalStackedBefore = + cvToJSON(existingEntryCV).value.value["total-ustx"].value; + const maxAmount = committedAmount + Number(totalStackedBefore); + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase, agg-commit or agg-increase. + topic: Pox4SignatureTopic.AggregateIncrease, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + // Act + const stackAggregationIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-increase", + [ + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (reward-cycle-index uint)) + Cl.uint(this.rewardCycleIndex), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationIncrease.result).toBeOk(Cl.bool(true)); + + operatorWallet.amountToCommit -= committedAmount; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label}`, + "stack-agg-increase", + "amount committed", + committedAmount.toString(), + "cycle index", + this.rewardCycleIndex.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-increase for index ${this.rewardCycleIndex}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts new file mode 100644 index 0000000000..a7dbf49cbb --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts @@ -0,0 +1,178 @@ +import { poxAddressToTuple } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { + currentCycle, + FIRST_BURNCHAIN_BLOCK_HEIGHT, + REWARD_CYCLE_LENGTH, +} from "./pox_Commands"; +import { Cl, ClarityType, isClarityType } from "@stacks/transactions"; +import { assert, expect } from "vitest"; + +export class StackExtendAuthCommand implements PoxCommand { + readonly wallet: Wallet; + readonly extendCount: number; + readonly authId: number; + readonly currentCycle: number; + + /** + * Constructs a `StackExtendAuthCommand` to lock uSTX for stacking. + * + * This command calls `stack-extend` using an `authorization`. + * + * @param wallet - Represents the Stacker's wallet. + * @param extendCount - Represents the cycles to extend the stack with. + * @param authId - Unique auth-id for the authorization. + * @param currentCycle - Represents the current PoX reward cycle. + * + * Constraints for running this command include: + * - The Stacker must have locked uSTX. + * - The Stacker must be stacking solo. + * - The Stacker must not have delegated to a pool. + * - The new lock period must be less than or equal to 12. + */ + constructor( + wallet: Wallet, + extendCount: number, + authId: number, + currentCycle: number, + ) { + this.wallet = wallet; + this.extendCount = extendCount; + this.authId = authId; + this.currentCycle = currentCycle; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker must have locked uSTX. + // - The Stacker must be stacking solo. + // - The Stacker must not have delegated to a pool. + // - The new lock period must be less than or equal to 12. + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + + return ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const { result: setAuthorization } = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.extendCount), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-extend"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + expect(setAuthorization).toBeOk(Cl.bool(true)); + const stackExtend = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-extend", + [ + // (extend-count uint) + Cl.uint(this.extendCount), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.wallet.btcAddress), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + const { result: firstExtendCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(stacker.unlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(firstExtendCycle, ClarityType.UInt)); + + const lastExtendCycle = Number(firstExtendCycle.value) + this.extendCount - + 1; + + const { result: extendedUnlockHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(lastExtendCycle + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(extendedUnlockHeight, ClarityType.UInt)); + + const newUnlockHeight = extendedUnlockHeight.value; + + expect(stackExtend.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.wallet.stxAddress), + "unlock-burn-height": Cl.uint(newUnlockHeight), + }), + ); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + // Update model so that we know this wallet's unlock height was extended. + wallet.unlockHeight = Number(newUnlockHeight); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stack-extend-auth", + "extend-count", + this.extendCount.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-extend auth extend-count ${this.extendCount}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts new file mode 100644 index 0000000000..56848d9448 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts @@ -0,0 +1,177 @@ +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { + currentCycle, + FIRST_BURNCHAIN_BLOCK_HEIGHT, + REWARD_CYCLE_LENGTH, +} from "./pox_Commands"; +import { Cl, ClarityType, isClarityType } from "@stacks/transactions"; +import { assert, expect } from "vitest"; + +export class StackExtendSigCommand implements PoxCommand { + readonly wallet: Wallet; + readonly extendCount: number; + readonly authId: number; + readonly currentCycle: number; + + /** + * Constructs a `StackExtendSigCommand` to lock uSTX for stacking. + * + * This command calls `stack-extend` using a `signature`. + * + * @param wallet - Represents the Stacker's wallet. + * @param extendCount - Represents the cycles to extend the stack with. + * @param authId - Unique auth-id for the authorization. + * @param currentCycle - Represents the current PoX reward cycle. + * + * Constraints for running this command include: + * - The Stacker must have locked uSTX. + * - The Stacker must be stacking solo. + * - The Stacker must not have delegated to a pool. + * - The new lock period must be less than or equal to 12. + */ + constructor( + wallet: Wallet, + extendCount: number, + authId: number, + currentCycle: number, + ) { + this.wallet = wallet; + this.extendCount = extendCount; + this.authId = authId; + this.currentCycle = currentCycle; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker must have locked uSTX. + // - The Stacker must be stacking solo. + // - The Stacker must not have delegated to a pool. + // - The new lock period must be less than or equal to 12. + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + + return ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: this.extendCount, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackExtend, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: stacker.amountLocked, + }); + + const stackExtend = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-extend", + [ + // (extend-count uint) + Cl.uint(this.extendCount), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.wallet.btcAddress), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + const { result: firstExtendCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(stacker.unlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(firstExtendCycle, ClarityType.UInt)); + + const lastExtendCycle = Number(firstExtendCycle.value) + this.extendCount - + 1; + + const { result: extendedUnlockHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(lastExtendCycle + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(extendedUnlockHeight, ClarityType.UInt)); + + const newUnlockHeight = extendedUnlockHeight.value; + + expect(stackExtend.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.wallet.stxAddress), + "unlock-burn-height": Cl.uint(newUnlockHeight), + }), + ); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + // Update model so that we know this wallet's unlock height was extended. + wallet.unlockHeight = Number(newUnlockHeight); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stack-extend-sig", + "extend-count", + this.extendCount.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-extend sig extend-count ${this.extendCount}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts new file mode 100644 index 0000000000..127ea1d984 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts @@ -0,0 +1,162 @@ +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { currentCycle } from "./pox_Commands"; +import { Cl, cvToJSON } from "@stacks/transactions"; +import { expect } from "vitest"; +import { tx } from "@hirosystems/clarinet-sdk"; + +/** + * The `StackIncreaseAuthCommand` locks up an additional amount + * of STX from `tx-sender`'s, indicated by `increase-by`. + * + * This command calls `stack-increase` using an `authorization`. + * + * Constraints for running this command include: + * - The Stacker must have locked uSTX. + * - The Stacker must be stacking solo. + * - The Stacker must not have delegated to a pool. + * - The increase amount must be less than or equal to the + * Stacker's unlocked uSTX amount. + */ + +export class StackIncreaseAuthCommand implements PoxCommand { + readonly wallet: Wallet; + readonly increaseBy: number; + readonly authId: number; + + /** + * Constructs a `StackIncreaseAuthCommand` to increase lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + * @param authId - Unique auth-id for the authorization. + */ + constructor(wallet: Wallet, increaseBy: number, authId: number) { + this.wallet = wallet; + this.increaseBy = increaseBy; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker must have locked uSTX. + // - The Stacker must be stacking solo. + // - The Stacker must not have delegated to a pool. + // - The increse amount must be less or equal to the + // Stacker's unlocked uSTX amount. + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + return ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + this.increaseBy <= stacker.amountUnlocked && + this.increaseBy >= 1 + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + const currentRewCycle = currentCycle(real.network); + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + // Get the lock period from the stacking state. This will be used for correctly + // issuing the authorization. + const stackingStateCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "stacking-state", + Cl.tuple({ stacker: Cl.principal(this.wallet.stxAddress) }), + ); + const period = cvToJSON(stackingStateCV).value.value["lock-period"].value; + + const maxAmount = stacker.amountLocked + this.increaseBy; + + // Act + + // Include the authorization and the `stack-increase` transactions in a single + // block. This way we ensure both the authorization and the stack-increase + // transactions are called during the same reward cycle and avoid the clarity + // error `ERR_INVALID_REWARD_CYCLE`. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii(Pox4SignatureTopic.StackIncrease), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-increase", + [ + // (increase-by uint) + Cl.uint(this.increaseBy), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.wallet.stxAddress), + "total-locked": Cl.uint(stacker.amountLocked + this.increaseBy), + }), + ); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + // Update model so that we know this wallet's locked amount and unlocked + // amount was extended. + wallet.amountLocked += this.increaseBy; + wallet.amountUnlocked -= this.increaseBy; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stack-increase-auth", + "increase-by", + this.increaseBy.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-increase auth increase-by ${this.increaseBy}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts new file mode 100644 index 0000000000..ec51e3d7e4 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts @@ -0,0 +1,172 @@ +import { Pox4SignatureTopic } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { + Cl, + ClarityType, + ClarityValue, + cvToJSON, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { assert, expect } from "vitest"; + +/** + * The `StackIncreaseSigCommand` locks up an additional amount + * of STX from `tx-sender`'s, indicated by `increase-by`. + * + * This command calls `stack-increase` using a `signature`. + * + * Constraints for running this command include: + * - The Stacker must have locked uSTX. + * - The Stacker must be stacking solo. + * - The Stacker must not have delegated to a pool. + * - The increase amount must be less than or equal to the + * Stacker's unlocked uSTX amount. + * - The increase amount must be equal or greater than 1. + */ +export class StackIncreaseSigCommand implements PoxCommand { + readonly wallet: Wallet; + readonly increaseBy: number; + readonly authId: number; + + /** + * Constructs a `StackIncreaseSigCommand` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + * @param authId - Unique auth-id for the authorization. + */ + constructor(wallet: Wallet, increaseBy: number, authId: number) { + this.wallet = wallet; + this.increaseBy = increaseBy; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker must have locked uSTX. + // - The Stacker must be stacking solo. + // - The Stacker must not have delegated to a pool. + // - The increse amount must be less than or equal to the + // Stacker's unlocked uSTX amount. + // - The increase amount must be equal or greater than 1. + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + return ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + this.increaseBy <= stacker.amountUnlocked && + this.increaseBy >= 1 + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const maxAmount = stacker.amountLocked + this.increaseBy; + + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + const { result: rewardCycleNextBlockCV } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycleNextBlockCV, ClarityType.UInt)); + + const rewardCycleNextBlock = cvToValue(rewardCycleNextBlockCV); + + // Get the lock period from the stacking state. This will be used for correctly + // issuing the authorization. + const stackingStateCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "stacking-state", + Cl.tuple({ stacker: Cl.principal(this.wallet.stxAddress) }), + ); + const period = cvToJSON(stackingStateCV).value.value["lock-period"].value; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: rewardCycleNextBlock, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: period, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackIncrease, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + const stackIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-increase", + [ + // (increase-by uint) + Cl.uint(this.increaseBy), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + expect(stackIncrease.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.wallet.stxAddress), + "total-locked": Cl.uint(stacker.amountLocked + this.increaseBy), + }), + ); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + // Update model so that we know this wallet's locked amount and unlocked amount was extended. + wallet.amountLocked += this.increaseBy; + wallet.amountUnlocked -= this.increaseBy; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stack-increase-sig", + "increase-by", + this.increaseBy.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-increase sig increase-by ${this.increaseBy}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts new file mode 100644 index 0000000000..108f0956b5 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts @@ -0,0 +1,206 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { + Cl, + ClarityType, + ClarityValue, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `StackStxAuthCommand` locks STX for stacking within PoX-4. This self-service + * operation allows the `tx-sender` (the `wallet` in this case) to participate + * as a Stacker. + * + * This command calls `stack-stx` using an `authorization`. + * + * Constraints for running this command include: + * - The Stacker cannot currently be engaged in another stacking operation. + * - A minimum threshold of uSTX must be met, determined by the + * `get-stacking-minimum` function at the time of this call. + * - The amount of uSTX locked may need to be increased in future reward cycles + * if the minimum threshold rises. + */ +export class StackStxAuthCommand implements PoxCommand { + readonly wallet: Wallet; + readonly authId: number; + readonly period: number; + readonly margin: number; + + /** + * Constructs a `StackStxAuthCommand` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param authId - Unique auth-id for the authorization. + * @param period - Number of reward cycles to lock uSTX. + * @param margin - Multiplier for minimum required uSTX to stack so that each + * Stacker locks a different amount of uSTX across test runs. + */ + constructor( + wallet: Wallet, + authId: number, + period: number, + margin: number, + ) { + this.wallet = wallet; + this.authId = authId; + this.period = period; + this.margin = margin; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - A minimum threshold of uSTX must be met, determined by the + // `get-stacking-minimum` function at the time of this call. + // - The Stacker cannot currently be engaged in another stacking operation. + // - The Stacker cannot currently be delegating STX to a delegatee. + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + return ( + model.stackingMinimum > 0 && !stacker.isStacking && !stacker.hasDelegated + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. For our tests, we will use the minimum amount of uSTX to be stacked + // in the given reward cycle multiplied by the margin, which is a randomly + // generated number passed to the constructor of this class. + const maxAmount = model.stackingMinimum * this.margin; + + const { result: setAuthorization } = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-stx"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + expect(setAuthorization).toBeOk(Cl.bool(true)); + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + // The amount of uSTX to be locked in the reward cycle. For this test, we + // will use the maximum amount of uSTX that can be used (per tx) with this + // signer key. + const amountUstx = maxAmount; + + // Act + const stackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight), + // (lock-period uint) + Cl.uint(this.period), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + const { result: rewardCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycle, ClarityType.UInt)); + + const { result: unlockBurnHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(Number(rewardCycle.value) + this.period + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + + // Assert + expect(stackStx.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amountUstx), + "signer-key": Cl.bufferFromHex(this.wallet.signerPubKey), + "stacker": Cl.principal(this.wallet.stxAddress), + "unlock-burn-height": Cl.uint(Number(unlockBurnHeight.value)), + }), + ); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + // Update model so that we know this wallet is stacking. This is important + // in order to prevent the test from stacking multiple times with the same + // address. + wallet.isStacking = true; + wallet.isStackingSolo = true; + // Update locked, unlocked, and unlock-height fields in the model. + wallet.amountLocked = amountUstx; + wallet.unlockHeight = Number(unlockBurnHeight.value); + wallet.amountUnlocked -= amountUstx; + wallet.firstLockedRewardCycle = Number(rewardCycle.value) + 1; + + model.nextRewardSetIndex++; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stack-stx-auth", + "lock-amount", + amountUstx.toString(), + "period", + this.period.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-stx auth auth-id ${this.authId} and period ${this.period}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts new file mode 100644 index 0000000000..baa87015a1 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts @@ -0,0 +1,204 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { + Cl, + ClarityType, + ClarityValue, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `StackStxSigCommand` locks STX for stacking within PoX-4. This self-service + * operation allows the `tx-sender` (the `wallet` in this case) to participate + * as a Stacker. + * + * This command calls stack-stx using a `signature`. + * + * Constraints for running this command include: + * - The Stacker cannot currently be engaged in another stacking operation. + * - A minimum threshold of uSTX must be met, determined by the + * `get-stacking-minimum` function at the time of this call. + * - The amount of uSTX locked may need to be increased in future reward cycles + * if the minimum threshold rises. + */ +export class StackStxSigCommand implements PoxCommand { + readonly wallet: Wallet; + readonly authId: number; + readonly period: number; + readonly margin: number; + + /** + * Constructs a `StackStxSigCommand` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param authId - Unique auth-id for the authorization. + * @param period - Number of reward cycles to lock uSTX. + * @param margin - Multiplier for minimum required uSTX to stack so that each + * Stacker locks a different amount of uSTX across test runs. + */ + constructor( + wallet: Wallet, + authId: number, + period: number, + margin: number, + ) { + this.wallet = wallet; + this.authId = authId; + this.period = period; + this.margin = margin; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - A minimum threshold of uSTX must be met, determined by the + // `get-stacking-minimum` function at the time of this call. + // - The Stacker cannot currently be engaged in another stacking operation. + // - The Stacker cannot currently be delegating STX to a delegatee. + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + return ( + model.stackingMinimum > 0 && !stacker.isStacking && !stacker.hasDelegated + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + const currentRewCycle = currentCycle(real.network); + + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. For our tests, we will use the minimum amount of uSTX to be stacked + // in the given reward cycle multiplied by the margin, which is a randomly + // generated number passed to the constructor of this class. + const maxAmount = model.stackingMinimum * this.margin; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: this.period, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackStx, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + // The amount of uSTX to be locked in the reward cycle. For this test, we + // will use the maximum amount of uSTX that can be used (per tx) with this + // signer key. + const amountUstx = maxAmount; + + // Act + const stackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight), + // (lock-period uint) + Cl.uint(this.period), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + const { result: rewardCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycle, ClarityType.UInt)); + + const { result: unlockBurnHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(Number(rewardCycle.value) + this.period + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + + // Assert + expect(stackStx.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amountUstx), + "signer-key": Cl.bufferFromHex(this.wallet.signerPubKey), + "stacker": Cl.principal(this.wallet.stxAddress), + "unlock-burn-height": Cl.uint(Number(unlockBurnHeight.value)), + }), + ); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + // Update model so that we know this wallet is stacking. This is important + // in order to prevent the test from stacking multiple times with the same + // address. + wallet.isStacking = true; + wallet.isStackingSolo = true; + // Update locked, unlocked, and unlock-height fields in the model. + wallet.amountLocked = amountUstx; + wallet.unlockHeight = Number(unlockBurnHeight.value); + wallet.amountUnlocked -= amountUstx; + wallet.firstLockedRewardCycle = Number(rewardCycle.value) + 1; + + model.nextRewardSetIndex++; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stack-stx-sig", + "lock-amount", + amountUstx.toString(), + "period", + this.period.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-stx sig auth-id ${this.authId} and period ${this.period}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tsconfig.json b/contrib/boot-contracts-stateful-prop-tests/tsconfig.json new file mode 100644 index 0000000000..aa218f6d42 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tsconfig.json @@ -0,0 +1,25 @@ +{ + "compilerOptions": { + "target": "ESNext", + "useDefineForClassFields": true, + "module": "ESNext", + "lib": ["ESNext"], + "skipLibCheck": true, + + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + + "strict": true, + "noImplicitAny": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": [ + "node_modules/@hirosystems/clarinet-sdk/vitest-helpers/src", + "tests" + ] +} diff --git a/contrib/boot-contracts-stateful-prop-tests/vitest.config.js b/contrib/boot-contracts-stateful-prop-tests/vitest.config.js new file mode 100644 index 0000000000..364c55f735 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/vitest.config.js @@ -0,0 +1,43 @@ +/// + +import { defineConfig } from "vite"; +import { + vitestSetupFilePath, + getClarinetVitestsArgv, +} from "@hirosystems/clarinet-sdk/vitest"; + +/* + In this file, Vitest is configured so that it works seamlessly with Clarinet and the Simnet. + + The `vitest-environment-clarinet` will initialise the clarinet-sdk + and make the `simnet` object available globally in the test files. + + `vitestSetupFilePath` points to a file in the `@hirosystems/clarinet-sdk` package that does two things: + - run `before` hooks to initialize the simnet and `after` hooks to collect costs and coverage reports. + - load custom vitest matchers to work with Clarity values (such as `expect(...).toBeUint()`) + + The `getClarinetVitestsArgv()` will parse options passed to the command `vitest run --` + - vitest run -- --manifest ./Clarinet.toml # pass a custom path + - vitest run -- --coverage --costs # collect coverage and cost reports +*/ + +export default defineConfig({ + test: { + environment: "clarinet", // use vitest-environment-clarinet + pool: "forks", + poolOptions: { + threads: { singleThread: true }, + forks: { singleFork: true }, + }, + setupFiles: [ + vitestSetupFilePath, + // custom setup files can be added here + ], + environmentOptions: { + clarinet: { + ...getClarinetVitestsArgv(), + // add or override options + }, + }, + }, +}); diff --git a/contrib/boot-contracts-unit-tests/.gitignore b/contrib/boot-contracts-unit-tests/.gitignore new file mode 100644 index 0000000000..76c2842b12 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/.gitignore @@ -0,0 +1,13 @@ + +**/settings/Mainnet.toml +**/settings/Testnet.toml +.cache/** +history.txt + +logs +*.log +npm-debug.log* +coverage +*.info +costs-reports.json +node_modules diff --git a/contrib/boot-contracts-unit-tests/.vscode/settings.json b/contrib/boot-contracts-unit-tests/.vscode/settings.json new file mode 100644 index 0000000000..306251957d --- /dev/null +++ b/contrib/boot-contracts-unit-tests/.vscode/settings.json @@ -0,0 +1,4 @@ + +{ + "files.eol": "\n" +} diff --git a/contrib/boot-contracts-unit-tests/.vscode/tasks.json b/contrib/boot-contracts-unit-tests/.vscode/tasks.json new file mode 100644 index 0000000000..4dec0ffa98 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/.vscode/tasks.json @@ -0,0 +1,19 @@ + +{ + "version": "2.0.0", + "tasks": [ + { + "label": "check contracts", + "group": "test", + "type": "shell", + "command": "clarinet check" + }, + { + "type": "npm", + "script": "test", + "group": "test", + "problemMatcher": [], + "label": "npm test" + } + ] +} diff --git a/contrib/boot-contracts-unit-tests/Clarinet.toml b/contrib/boot-contracts-unit-tests/Clarinet.toml new file mode 100644 index 0000000000..00907244f8 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/Clarinet.toml @@ -0,0 +1,21 @@ +[project] +name = 'boot-contracts-unit-tests' +description = '' +authors = [] +telemetry = false +cache_dir = './.cache' +requirements = [] + +[contracts.indirect] +path = 'contracts/indirect.clar' +clarity_version = 2 +epoch = 2.4 + +[repl.analysis] +passes = ['check_checker'] + +[repl.analysis.check_checker] +strict = false +trusted_sender = false +trusted_caller = false +callee_filter = false diff --git a/contrib/boot-contracts-unit-tests/README.md b/contrib/boot-contracts-unit-tests/README.md new file mode 100644 index 0000000000..a6e6eef8f1 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/README.md @@ -0,0 +1,24 @@ +# Boot contracts unit tests + +Run unit tests with clarinet on boot contracts. + +Contracts tests: + +- [x] pox-4.clar + + +## About boot contract unit testing with Clarinet + +- To really test contracts such as the pox contracts, we need to test the boot contracts embedded +into Clarinet. For example `ST000000000000000000002AMW42H.pox-4.clar` +- This mean that calling this contract will interact +- Since the boot contracts are embedded into Clarinet, we only test the version of the contract +that is in Clarinet, and not the ones that actually live in the stacks-core repository. + +We are able to get the boot contracts coverage thanks to this settings in `vitest.config.js`: +```js + includeBootContracts: true, + bootContractsPath: `${process.cwd()}/boot_contracts`, +``` +A copy of the tested boot contracts is includedin this directory as well so that we are able to +compute and render the code coverage. diff --git a/contrib/boot-contracts-unit-tests/boot_contracts/pox-4.clar b/contrib/boot-contracts-unit-tests/boot_contracts/pox-4.clar new file mode 100644 index 0000000000..9824a71931 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/boot_contracts/pox-4.clar @@ -0,0 +1,1484 @@ +;; The .pox-4 contract +;; Error codes +(define-constant ERR_STACKING_UNREACHABLE 255) +(define-constant ERR_STACKING_CORRUPTED_STATE 254) +(define-constant ERR_STACKING_INSUFFICIENT_FUNDS 1) +(define-constant ERR_STACKING_INVALID_LOCK_PERIOD 2) +(define-constant ERR_STACKING_ALREADY_STACKED 3) +(define-constant ERR_STACKING_NO_SUCH_PRINCIPAL 4) +(define-constant ERR_STACKING_EXPIRED 5) +(define-constant ERR_STACKING_STX_LOCKED 6) +(define-constant ERR_STACKING_PERMISSION_DENIED 9) +(define-constant ERR_STACKING_THRESHOLD_NOT_MET 11) +(define-constant ERR_STACKING_POX_ADDRESS_IN_USE 12) +(define-constant ERR_STACKING_INVALID_POX_ADDRESS 13) + +(define-constant ERR_STACKING_INVALID_AMOUNT 18) +(define-constant ERR_NOT_ALLOWED 19) +(define-constant ERR_STACKING_ALREADY_DELEGATED 20) +(define-constant ERR_DELEGATION_EXPIRES_DURING_LOCK 21) +(define-constant ERR_DELEGATION_TOO_MUCH_LOCKED 22) +(define-constant ERR_DELEGATION_POX_ADDR_REQUIRED 23) +(define-constant ERR_INVALID_START_BURN_HEIGHT 24) +(define-constant ERR_NOT_CURRENT_STACKER 25) +(define-constant ERR_STACK_EXTEND_NOT_LOCKED 26) +(define-constant ERR_STACK_INCREASE_NOT_LOCKED 27) +(define-constant ERR_DELEGATION_NO_REWARD_SLOT 28) +(define-constant ERR_DELEGATION_WRONG_REWARD_SLOT 29) +(define-constant ERR_STACKING_IS_DELEGATED 30) +(define-constant ERR_STACKING_NOT_DELEGATED 31) +(define-constant ERR_INVALID_SIGNER_KEY 32) +(define-constant ERR_REUSED_SIGNER_KEY 33) +(define-constant ERR_DELEGATION_ALREADY_REVOKED 34) +(define-constant ERR_INVALID_SIGNATURE_PUBKEY 35) +(define-constant ERR_INVALID_SIGNATURE_RECOVER 36) +(define-constant ERR_INVALID_REWARD_CYCLE 37) +(define-constant ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH 38) +(define-constant ERR_SIGNER_AUTH_USED 39) +(define-constant ERR_INVALID_INCREASE 40) + +;; Valid values for burnchain address versions. +;; These first four correspond to address hash modes in Stacks 2.1, +;; and are defined in pox-mainnet.clar and pox-testnet.clar (so they +;; cannot be defined here again). +(define-constant ADDRESS_VERSION_P2PKH 0x00) +(define-constant ADDRESS_VERSION_P2SH 0x01) +(define-constant ADDRESS_VERSION_P2WPKH 0x02) +(define-constant ADDRESS_VERSION_P2WSH 0x03) +(define-constant ADDRESS_VERSION_NATIVE_P2WPKH 0x04) +(define-constant ADDRESS_VERSION_NATIVE_P2WSH 0x05) +(define-constant ADDRESS_VERSION_NATIVE_P2TR 0x06) + +;; Values for stacks address versions +(define-constant STACKS_ADDR_VERSION_MAINNET 0x16) +(define-constant STACKS_ADDR_VERSION_TESTNET 0x1a) + +;; Keep these constants in lock-step with the address version buffs above +;; Maximum value of an address version as a uint +(define-constant MAX_ADDRESS_VERSION u6) +;; Maximum value of an address version that has a 20-byte hashbytes +;; (0x00, 0x01, 0x02, 0x03, and 0x04 have 20-byte hashbytes) +(define-constant MAX_ADDRESS_VERSION_BUFF_20 u4) +;; Maximum value of an address version that has a 32-byte hashbytes +;; (0x05 and 0x06 have 32-byte hashbytes) +(define-constant MAX_ADDRESS_VERSION_BUFF_32 u6) + +;; PoX mainnet constants +;; Min/max number of reward cycles uSTX can be locked for +(define-constant MIN_POX_REWARD_CYCLES u1) +(define-constant MAX_POX_REWARD_CYCLES u12) + +;; Default length of the PoX registration window, in burnchain blocks. +(define-constant PREPARE_CYCLE_LENGTH (if is-in-mainnet u100 u50)) + +;; Default length of the PoX reward cycle, in burnchain blocks. +(define-constant REWARD_CYCLE_LENGTH (if is-in-mainnet u2100 u1050)) + +;; Stacking thresholds +(define-constant STACKING_THRESHOLD_25 (if is-in-mainnet u20000 u8000)) + +;; SIP18 message prefix +(define-constant SIP018_MSG_PREFIX 0x534950303138) + +;; Data vars that store a copy of the burnchain configuration. +;; Implemented as data-vars, so that different configurations can be +;; used in e.g. test harnesses. +(define-data-var pox-prepare-cycle-length uint PREPARE_CYCLE_LENGTH) +(define-data-var pox-reward-cycle-length uint REWARD_CYCLE_LENGTH) +(define-data-var first-burnchain-block-height uint u0) +(define-data-var configured bool false) +(define-data-var first-pox-4-reward-cycle uint u0) + +;; This function can only be called once, when it boots up +(define-public (set-burnchain-parameters (first-burn-height uint) + (prepare-cycle-length uint) + (reward-cycle-length uint) + (begin-pox-4-reward-cycle uint)) + (begin + (asserts! (not (var-get configured)) (err ERR_NOT_ALLOWED)) + (var-set first-burnchain-block-height first-burn-height) + (var-set pox-prepare-cycle-length prepare-cycle-length) + (var-set pox-reward-cycle-length reward-cycle-length) + (var-set first-pox-4-reward-cycle begin-pox-4-reward-cycle) + (var-set configured true) + (ok true)) +) + +;; The Stacking lock-up state and associated metadata. +;; Records are inserted into this map via `stack-stx`, `delegate-stack-stx`, `stack-extend` +;; `delegate-stack-extend` and burnchain transactions for invoking `stack-stx`, etc. +;; Records will be deleted from this map when auto-unlocks are processed +;; +;; This map de-normalizes some state from the `reward-cycle-pox-address-list` map +;; and the `pox-4` contract tries to keep this state in sync with the reward-cycle +;; state. The major invariants of this `stacking-state` map are: +;; (1) any entry in `reward-cycle-pox-address-list` with `some stacker` points to a real `stacking-state` +;; (2) `stacking-state.reward-set-indexes` matches the index of that `reward-cycle-pox-address-list` +;; (3) all `stacking-state.reward-set-indexes` match the index of their reward cycle entries +;; (4) `stacking-state.pox-addr` matches `reward-cycle-pox-address-list.pox-addr` +;; (5) if set, (len reward-set-indexes) == lock-period +;; (6) (reward-cycle-to-burn-height (+ lock-period first-reward-cycle)) == (get unlock-height (stx-account stacker)) +;; These invariants only hold while `cur-reward-cycle < (+ lock-period first-reward-cycle)` +;; +(define-map stacking-state + { stacker: principal } + { + ;; Description of the underlying burnchain address that will + ;; receive PoX'ed tokens. Translating this into an address + ;; depends on the burnchain being used. When Bitcoin is + ;; the burnchain, this gets translated into a p2pkh, p2sh, + ;; p2wpkh-p2sh, p2wsh-p2sh, p2wpkh, p2wsh, or p2tr UTXO, + ;; depending on the version. The `hashbytes` field *must* be + ;; either 20 bytes or 32 bytes, depending on the output. + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + ;; how long the uSTX are locked, in reward cycles. + lock-period: uint, + ;; reward cycle when rewards begin + first-reward-cycle: uint, + ;; indexes in each reward-set associated with this user. + ;; these indexes are only valid looking forward from + ;; `first-reward-cycle` (i.e., they do not correspond + ;; to entries in the reward set that may have been from + ;; previous stack-stx calls, or prior to an extend) + reward-set-indexes: (list 12 uint), + ;; principal of the delegate, if stacker has delegated + delegated-to: (optional principal), + } +) + +;; Delegation relationships +(define-map delegation-state + { stacker: principal } + { + amount-ustx: uint, ;; how many uSTX delegated? + delegated-to: principal, ;; who are we delegating? + until-burn-ht: (optional uint), ;; how long does the delegation last? + ;; does the delegate _need_ to use a specific + ;; pox recipient address? + pox-addr: (optional { version: (buff 1), hashbytes: (buff 32) }) + } +) + +;; allowed contract-callers +(define-map allowance-contract-callers + { sender: principal, contract-caller: principal } + { until-burn-ht: (optional uint) }) + +;; How many uSTX are stacked in a given reward cycle. +;; Updated when a new PoX address is registered, or when more STX are granted +;; to it. +(define-map reward-cycle-total-stacked + { reward-cycle: uint } + { total-ustx: uint } +) + +;; Internal map read by the Stacks node to iterate through the list of +;; PoX reward addresses on a per-reward-cycle basis. +(define-map reward-cycle-pox-address-list + { reward-cycle: uint, index: uint } + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + total-ustx: uint, + stacker: (optional principal), + signer: (buff 33) + } +) + +(define-map reward-cycle-pox-address-list-len + { reward-cycle: uint } + { len: uint } +) + +;; how much has been locked up for this address before +;; committing? +;; this map allows stackers to stack amounts < minimum +;; by paying the cost of aggregation during the commit +(define-map partial-stacked-by-cycle + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + sender: principal + } + { stacked-amount: uint } +) + +;; This is identical to partial-stacked-by-cycle, but its data is never deleted. +;; It is used to preserve data for downstream clients to observe aggregate +;; commits. Each key/value pair in this map is simply the last value of +;; partial-stacked-by-cycle right after it was deleted (so, subsequent calls +;; to the `stack-aggregation-*` functions will overwrite this). +(define-map logged-partial-stacked-by-cycle + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + sender: principal + } + { stacked-amount: uint } +) + +;; State for setting authorizations for signer keys to be used in +;; certain stacking transactions. These fields match the fields used +;; in the message hash for signature-based signer key authorizations. +;; Values in this map are set in `set-signer-key-authorization`. +(define-map signer-key-authorizations + { + ;; The signer key being authorized + signer-key: (buff 33), + ;; The reward cycle for which the authorization is valid. + ;; For `stack-stx` and `stack-extend`, this refers to the reward + ;; cycle where the transaction is confirmed. For `stack-aggregation-commit`, + ;; this refers to the reward cycle argument in that function. + reward-cycle: uint, + ;; For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + ;; this refers to `extend-count`. For `stack-aggregation-commit`, this is `u1`. + period: uint, + ;; A string representing the function where this authorization is valid. Either + ;; `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: (string-ascii 14), + ;; The PoX address that can be used with this signer key + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + ;; The unique auth-id for this authorization + auth-id: uint, + ;; The maximum amount of uSTX that can be used (per tx) with this signer key + max-amount: uint, + } + bool ;; Whether the authorization can be used or not +) + +;; State for tracking used signer key authorizations. This prevents re-use +;; of the same signature or pre-set authorization for multiple transactions. +;; Refer to the `signer-key-authorizations` map for the documentation on these fields +(define-map used-signer-key-authorizations + { + signer-key: (buff 33), + reward-cycle: uint, + period: uint, + topic: (string-ascii 14), + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + auth-id: uint, + max-amount: uint, + } + bool ;; Whether the field has been used or not +) + +;; What's the reward cycle number of the burnchain block height? +;; Will runtime-abort if height is less than the first burnchain block (this is intentional) +(define-read-only (burn-height-to-reward-cycle (height uint)) + (/ (- height (var-get first-burnchain-block-height)) (var-get pox-reward-cycle-length))) + +;; What's the block height at the start of a given reward cycle? +(define-read-only (reward-cycle-to-burn-height (cycle uint)) + (+ (var-get first-burnchain-block-height) (* cycle (var-get pox-reward-cycle-length)))) + +;; What's the current PoX reward cycle? +(define-read-only (current-pox-reward-cycle) + (burn-height-to-reward-cycle burn-block-height)) + +;; Get the _current_ PoX stacking principal information. If the information +;; is expired, or if there's never been such a stacker, then returns none. +(define-read-only (get-stacker-info (stacker principal)) + (match (map-get? stacking-state { stacker: stacker }) + stacking-info + (if (<= (+ (get first-reward-cycle stacking-info) (get lock-period stacking-info)) (current-pox-reward-cycle)) + ;; present, but lock has expired + none + ;; present, and lock has not expired + (some stacking-info) + ) + ;; no state at all + none + )) + +(define-read-only (check-caller-allowed) + (or (is-eq tx-sender contract-caller) + (let ((caller-allowed + ;; if not in the caller map, return false + (unwrap! (map-get? allowance-contract-callers + { sender: tx-sender, contract-caller: contract-caller }) + false)) + (expires-at + ;; if until-burn-ht not set, then return true (because no expiry) + (unwrap! (get until-burn-ht caller-allowed) true))) + ;; is the caller allowance expired? + (if (>= burn-block-height expires-at) + false + true)))) + +(define-read-only (get-check-delegation (stacker principal)) + (let ((delegation-info (try! (map-get? delegation-state { stacker: stacker })))) + ;; did the existing delegation expire? + (if (match (get until-burn-ht delegation-info) + until-burn-ht (> burn-block-height until-burn-ht) + false) + ;; it expired, return none + none + ;; delegation is active + (some delegation-info)))) + +;; Get the size of the reward set for a reward cycle. +;; Note that this also _will_ return PoX addresses that are beneath +;; the minimum threshold -- i.e. the threshold can increase after insertion. +;; Used internally by the Stacks node, which filters out the entries +;; in this map to select PoX addresses with enough STX. +(define-read-only (get-reward-set-size (reward-cycle uint)) + (default-to + u0 + (get len (map-get? reward-cycle-pox-address-list-len { reward-cycle: reward-cycle })))) + +;; Add a single PoX address to a single reward cycle. +;; Used to build up a set of per-reward-cycle PoX addresses. +;; No checking will be done -- don't call if this PoX address is already registered in this reward cycle! +;; Returns the index into the reward cycle that the PoX address is stored to +(define-private (append-reward-cycle-pox-addr (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (reward-cycle uint) + (amount-ustx uint) + (stacker (optional principal)) + (signer (buff 33))) + (let ((sz (get-reward-set-size reward-cycle))) + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: sz } + { pox-addr: pox-addr, total-ustx: amount-ustx, stacker: stacker, signer: signer }) + (map-set reward-cycle-pox-address-list-len + { reward-cycle: reward-cycle } + { len: (+ u1 sz) }) + sz)) + +;; How many uSTX are stacked? +(define-read-only (get-total-ustx-stacked (reward-cycle uint)) + (default-to + u0 + (get total-ustx (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) +) + +;; Called internally by the node to iterate through the list of PoX addresses in this reward cycle. +;; Returns (optional (tuple (pox-addr ) (total-ustx ))) +(define-read-only (get-reward-set-pox-address (reward-cycle uint) (index uint)) + (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: index })) + +;; Add a PoX address to the `cycle-index`-th reward cycle, if `cycle-index` is between 0 and the given num-cycles (exclusive). +;; Arguments are given as a tuple, so this function can be (folded ..)'ed onto a list of its arguments. +;; Used by add-pox-addr-to-reward-cycles. +;; No checking is done. +;; The returned tuple is the same as inputted `params`, but the `i` field is incremented if +;; the pox-addr was added to the given cycle. Also, `reward-set-indexes` grows to include all +;; of the `reward-cycle-index` key parts of the `reward-cycle-pox-address-list` which get added by this function. +;; This way, the caller knows which items in a given reward cycle's PoX address list got updated. +(define-private (add-pox-addr-to-ith-reward-cycle (cycle-index uint) (params (tuple + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (reward-set-indexes (list 12 uint)) + (first-reward-cycle uint) + (num-cycles uint) + (stacker (optional principal)) + (signer (buff 33)) + (amount-ustx uint) + (i uint)))) + (let ((reward-cycle (+ (get first-reward-cycle params) (get i params))) + (num-cycles (get num-cycles params)) + (i (get i params)) + (reward-set-index (if (< i num-cycles) + (let ((total-ustx (get-total-ustx-stacked reward-cycle)) + (reward-index + ;; record how many uSTX this pox-addr will stack for in the given reward cycle + (append-reward-cycle-pox-addr + (get pox-addr params) + reward-cycle + (get amount-ustx params) + (get stacker params) + (get signer params) + ))) + ;; update running total + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: (+ (get amount-ustx params) total-ustx) }) + (some reward-index)) + none)) + (next-i (if (< i num-cycles) (+ i u1) i))) + { + pox-addr: (get pox-addr params), + first-reward-cycle: (get first-reward-cycle params), + num-cycles: num-cycles, + amount-ustx: (get amount-ustx params), + stacker: (get stacker params), + signer: (get signer params), + reward-set-indexes: (match + reward-set-index new (unwrap-panic (as-max-len? (append (get reward-set-indexes params) new) u12)) + (get reward-set-indexes params)), + i: next-i + })) + +;; Add a PoX address to a given sequence of reward cycle lists. +;; A PoX address can be added to at most 12 consecutive cycles. +;; No checking is done. +(define-private (add-pox-addr-to-reward-cycles (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (first-reward-cycle uint) + (num-cycles uint) + (amount-ustx uint) + (stacker principal) + (signer (buff 33))) + (let ((cycle-indexes (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11)) + (results (fold add-pox-addr-to-ith-reward-cycle cycle-indexes + { pox-addr: pox-addr, first-reward-cycle: first-reward-cycle, num-cycles: num-cycles, + reward-set-indexes: (list), amount-ustx: amount-ustx, i: u0, stacker: (some stacker), signer: signer })) + (reward-set-indexes (get reward-set-indexes results))) + ;; For safety, add up the number of times (add-principal-to-ith-reward-cycle) returns 1. + ;; It _should_ be equal to num-cycles. + (asserts! (is-eq num-cycles (get i results)) (err ERR_STACKING_UNREACHABLE)) + (asserts! (is-eq num-cycles (len reward-set-indexes)) (err ERR_STACKING_UNREACHABLE)) + (ok reward-set-indexes))) + +(define-private (add-pox-partial-stacked-to-ith-cycle + (cycle-index uint) + (params { pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + num-cycles: uint, + amount-ustx: uint })) + (let ((pox-addr (get pox-addr params)) + (num-cycles (get num-cycles params)) + (reward-cycle (get reward-cycle params)) + (amount-ustx (get amount-ustx params))) + (let ((current-amount + (default-to u0 + (get stacked-amount + (map-get? partial-stacked-by-cycle { sender: tx-sender, pox-addr: pox-addr, reward-cycle: reward-cycle }))))) + (if (>= cycle-index num-cycles) + ;; do not add to cycles >= cycle-index + false + ;; otherwise, add to the partial-stacked-by-cycle + (map-set partial-stacked-by-cycle + { sender: tx-sender, pox-addr: pox-addr, reward-cycle: reward-cycle } + { stacked-amount: (+ amount-ustx current-amount) })) + ;; produce the next params tuple + { pox-addr: pox-addr, + reward-cycle: (+ u1 reward-cycle), + num-cycles: num-cycles, + amount-ustx: amount-ustx }))) + +;; Add a PoX address to a given sequence of partial reward cycle lists. +;; A PoX address can be added to at most 12 consecutive cycles. +;; No checking is done. +(define-private (add-pox-partial-stacked (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (first-reward-cycle uint) + (num-cycles uint) + (amount-ustx uint)) + (let ((cycle-indexes (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11))) + (fold add-pox-partial-stacked-to-ith-cycle cycle-indexes + { pox-addr: pox-addr, reward-cycle: first-reward-cycle, num-cycles: num-cycles, amount-ustx: amount-ustx }) + true)) + +;; What is the minimum number of uSTX to be stacked in the given reward cycle? +;; Used internally by the Stacks node, and visible publicly. +(define-read-only (get-stacking-minimum) + (/ stx-liquid-supply STACKING_THRESHOLD_25)) + +;; Is the address mode valid for a PoX address? +(define-read-only (check-pox-addr-version (version (buff 1))) + (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION)) + +;; Is this buffer the right length for the given PoX address? +(define-read-only (check-pox-addr-hashbytes (version (buff 1)) (hashbytes (buff 32))) + (if (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION_BUFF_20) + (is-eq (len hashbytes) u20) + (if (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION_BUFF_32) + (is-eq (len hashbytes) u32) + false))) + +;; Is the given lock period valid? +(define-read-only (check-pox-lock-period (lock-period uint)) + (and (>= lock-period MIN_POX_REWARD_CYCLES) + (<= lock-period MAX_POX_REWARD_CYCLES))) + +;; Evaluate if a participant can stack an amount of STX for a given period. +;; This method is designed as a read-only method so that it can be used as +;; a set of guard conditions and also as a read-only RPC call that can be +;; performed beforehand. +(define-read-only (can-stack-stx (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (amount-ustx uint) + (first-reward-cycle uint) + (num-cycles uint)) + (begin + ;; minimum uSTX must be met + (asserts! (<= (get-stacking-minimum) amount-ustx) + (err ERR_STACKING_THRESHOLD_NOT_MET)) + + (minimal-can-stack-stx pox-addr amount-ustx first-reward-cycle num-cycles))) + +;; Evaluate if a participant can stack an amount of STX for a given period. +;; This method is designed as a read-only method so that it can be used as +;; a set of guard conditions and also as a read-only RPC call that can be +;; performed beforehand. +(define-read-only (minimal-can-stack-stx + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (amount-ustx uint) + (first-reward-cycle uint) + (num-cycles uint)) + (begin + ;; amount must be valid + (asserts! (> amount-ustx u0) + (err ERR_STACKING_INVALID_AMOUNT)) + + ;; lock period must be in acceptable range. + (asserts! (check-pox-lock-period num-cycles) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; address version must be valid + (asserts! (check-pox-addr-version (get version pox-addr)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + + ;; address hashbytes must be valid for the version + (asserts! (check-pox-addr-hashbytes (get version pox-addr) (get hashbytes pox-addr)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + + (ok true))) + +;; Revoke contract-caller authorization to call stacking methods +(define-public (disallow-contract-caller (caller principal)) + (begin + (asserts! (is-eq tx-sender contract-caller) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-delete allowance-contract-callers { sender: tx-sender, contract-caller: caller })))) + +;; Give a contract-caller authorization to call stacking methods +;; normally, stacking methods may only be invoked by _direct_ transactions +;; (i.e., the tx-sender issues a direct contract-call to the stacking methods) +;; by issuing an allowance, the tx-sender may call through the allowed contract +(define-public (allow-contract-caller (caller principal) (until-burn-ht (optional uint))) + (begin + (asserts! (is-eq tx-sender contract-caller) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-set allowance-contract-callers + { sender: tx-sender, contract-caller: caller } + { until-burn-ht: until-burn-ht })))) + +;; Lock up some uSTX for stacking! Note that the given amount here is in micro-STX (uSTX). +;; The STX will be locked for the given number of reward cycles (lock-period). +;; This is the self-service interface. tx-sender will be the Stacker. +;; +;; * The given stacker cannot currently be stacking. +;; * You will need the minimum uSTX threshold. This will be determined by (get-stacking-minimum) +;; at the time this method is called. +;; * You may need to increase the amount of uSTX locked up later, since the minimum uSTX threshold +;; may increase between reward cycles. +;; * You need to provide a signer key to be used in the signer DKG process. +;; * The Stacker will receive rewards in the reward cycle following `start-burn-ht`. +;; Importantly, `start-burn-ht` may not be further into the future than the next reward cycle, +;; and in most cases should be set to the current burn block height. +;; +;; To ensure that the Stacker is authorized to use the provided `signer-key`, the stacker +;; must provide either a signature have an authorization already saved. Refer to +;; `verify-signer-key-sig` for more information. +;; +;; The tokens will unlock and be returned to the Stacker (tx-sender) automatically. +(define-public (stack-stx (amount-ustx uint) + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (start-burn-ht uint) + (lock-period uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + ;; this stacker's first reward cycle is the _next_ reward cycle + (let ((first-reward-cycle (+ u1 (current-pox-reward-cycle))) + (specified-reward-cycle (+ u1 (burn-height-to-reward-cycle start-burn-ht)))) + ;; the start-burn-ht must result in the next reward cycle, do not allow stackers + ;; to "post-date" their `stack-stx` transaction + (asserts! (is-eq first-reward-cycle specified-reward-cycle) + (err ERR_INVALID_START_BURN_HEIGHT)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; tx-sender principal must not be stacking + (asserts! (is-none (get-stacker-info tx-sender)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; the Stacker must have sufficient unlocked funds + (asserts! (>= (stx-get-balance tx-sender) amount-ustx) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; Validate ownership of the given signer key + (try! (consume-signer-key-authorization pox-addr (- first-reward-cycle u1) "stack-stx" lock-period signer-sig signer-key amount-ustx max-amount auth-id)) + + ;; ensure that stacking can be performed + (try! (can-stack-stx pox-addr amount-ustx first-reward-cycle lock-period)) + + ;; register the PoX address with the amount stacked + (let ((reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-reward-cycle lock-period amount-ustx tx-sender signer-key)))) + ;; add stacker record + (map-set stacking-state + { stacker: tx-sender } + { pox-addr: pox-addr, + reward-set-indexes: reward-set-indexes, + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: none }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: tx-sender, lock-amount: amount-ustx, signer-key: signer-key, unlock-burn-height: (reward-cycle-to-burn-height (+ first-reward-cycle lock-period)) })))) + +;; Revokes the delegation to the current stacking pool. +;; New in pox-4: Fails if the delegation was already revoked. +;; Returns the last delegation state. +(define-public (revoke-delegate-stx) + (let ((last-delegation-state (get-check-delegation tx-sender))) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + (asserts! (is-some last-delegation-state) (err ERR_DELEGATION_ALREADY_REVOKED)) + (asserts! (map-delete delegation-state { stacker: tx-sender }) (err ERR_DELEGATION_ALREADY_REVOKED)) + (ok last-delegation-state))) + +;; Delegate to `delegate-to` the ability to stack from a given address. +;; This method _does not_ lock the funds, rather, it allows the delegate +;; to issue the stacking lock. +;; The caller specifies: +;; * amount-ustx: the total amount of ustx the delegate may be allowed to lock +;; * until-burn-ht: an optional burn height at which this delegation expires +;; * pox-addr: an optional address to which any rewards *must* be sent +(define-public (delegate-stx (amount-ustx uint) + (delegate-to principal) + (until-burn-ht (optional uint)) + (pox-addr (optional { version: (buff 1), hashbytes: (buff 32) }))) + + (begin + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; delegate-stx no longer requires the delegator to not currently + ;; be stacking. + ;; delegate-stack-* functions assert that + ;; 1. users can't swim in two pools at the same time. + ;; 2. users can't switch pools without cool down cycle. + ;; Other pool admins can't increase or extend. + ;; 3. users can't join a pool while already directly stacking. + + ;; pox-addr, if given, must be valid + (match pox-addr + address + (asserts! (check-pox-addr-version (get version address)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + true) + + (match pox-addr + pox-tuple + (asserts! (check-pox-addr-hashbytes (get version pox-tuple) (get hashbytes pox-tuple)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + true) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; add delegation record + (map-set delegation-state + { stacker: tx-sender } + { amount-ustx: amount-ustx, + delegated-to: delegate-to, + until-burn-ht: until-burn-ht, + pox-addr: pox-addr }) + + (ok true))) + +;; Generate a message hash for validating a signer key. +;; The message hash follows SIP018 for signing structured data. The structured data +;; is the tuple `{ pox-addr: { version, hashbytes }, reward-cycle, auth-id, max-amount }`. +;; The domain is `{ name: "pox-4-signer", version: "1.0.0", chain-id: chain-id }`. +(define-read-only (get-signer-key-message-hash (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (topic (string-ascii 14)) + (period uint) + (max-amount uint) + (auth-id uint)) + (sha256 (concat + SIP018_MSG_PREFIX + (concat + (sha256 (unwrap-panic (to-consensus-buff? { name: "pox-4-signer", version: "1.0.0", chain-id: chain-id }))) + (sha256 (unwrap-panic + (to-consensus-buff? { + pox-addr: pox-addr, + reward-cycle: reward-cycle, + topic: topic, + period: period, + auth-id: auth-id, + max-amount: max-amount, + }))))))) + +;; Verify a signature from the signing key for this specific stacker. +;; See `get-signer-key-message-hash` for details on the message hash. +;; +;; Note that `reward-cycle` corresponds to the _current_ reward cycle, +;; when used with `stack-stx` and `stack-extend`. Both the reward cycle and +;; the lock period are inflexible, which means that the stacker must confirm their transaction +;; during the exact reward cycle and with the exact period that the signature or authorization was +;; generated for. +;; +;; The `amount` field is checked to ensure it is not larger than `max-amount`, which is +;; a field in the authorization. `auth-id` is a random uint to prevent authorization +;; replays. +;; +;; This function does not verify the payload of the authorization. The caller of +;; this function must ensure that the payload (reward cycle, period, topic, and pox-addr) +;; are valid according to the caller function's requirements. +;; +;; When `signer-sig` is present, the public key is recovered from the signature +;; and compared to `signer-key`. If `signer-sig` is `none`, the function verifies that an authorization was previously +;; added for this key. +;; +;; This function checks to ensure that the authorization hasn't been used yet, but it +;; does _not_ store the authorization as used. The function `consume-signer-key-authorization` +;; handles that, and this read-only function is exposed for client-side verification. +(define-read-only (verify-signer-key-sig (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (topic (string-ascii 14)) + (period uint) + (signer-sig-opt (optional (buff 65))) + (signer-key (buff 33)) + (amount uint) + (max-amount uint) + (auth-id uint)) + (begin + ;; Validate that amount is less than or equal to `max-amount` + (asserts! (>= max-amount amount) (err ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH)) + (asserts! (is-none (map-get? used-signer-key-authorizations { signer-key: signer-key, reward-cycle: reward-cycle, topic: topic, period: period, pox-addr: pox-addr, auth-id: auth-id, max-amount: max-amount })) + (err ERR_SIGNER_AUTH_USED)) + (match signer-sig-opt + ;; `signer-sig` is present, verify the signature + signer-sig (ok (asserts! + (is-eq + (unwrap! (secp256k1-recover? + (get-signer-key-message-hash pox-addr reward-cycle topic period max-amount auth-id) + signer-sig) (err ERR_INVALID_SIGNATURE_RECOVER)) + signer-key) + (err ERR_INVALID_SIGNATURE_PUBKEY))) + ;; `signer-sig` is not present, verify that an authorization was previously added for this key + (ok (asserts! (default-to false (map-get? signer-key-authorizations + { signer-key: signer-key, reward-cycle: reward-cycle, period: period, topic: topic, pox-addr: pox-addr, auth-id: auth-id, max-amount: max-amount })) + (err ERR_NOT_ALLOWED))) + )) + ) + +;; This function does two things: +;; +;; - Verify that a signer key is authorized to be used +;; - Updates the `used-signer-key-authorizations` map to prevent reuse +;; +;; This "wrapper" method around `verify-signer-key-sig` allows that function to remain +;; read-only, so that it can be used by clients as a sanity check before submitting a transaction. +(define-private (consume-signer-key-authorization (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (topic (string-ascii 14)) + (period uint) + (signer-sig-opt (optional (buff 65))) + (signer-key (buff 33)) + (amount uint) + (max-amount uint) + (auth-id uint)) + (begin + ;; verify the authorization + (try! (verify-signer-key-sig pox-addr reward-cycle topic period signer-sig-opt signer-key amount max-amount auth-id)) + ;; update the `used-signer-key-authorizations` map + (asserts! (map-insert used-signer-key-authorizations + { signer-key: signer-key, reward-cycle: reward-cycle, topic: topic, period: period, pox-addr: pox-addr, auth-id: auth-id, max-amount: max-amount } true) + (err ERR_SIGNER_AUTH_USED)) + (ok true))) + +;; Commit partially stacked STX and allocate a new PoX reward address slot. +;; This allows a stacker/delegate to lock fewer STX than the minimal threshold in multiple transactions, +;; so long as: 1. The pox-addr is the same. +;; 2. This "commit" transaction is called _before_ the PoX anchor block. +;; This ensures that each entry in the reward set returned to the stacks-node is greater than the threshold, +;; but does not require it be all locked up within a single transaction +;; +;; Returns (ok uint) on success, where the given uint is the reward address's index in the list of reward +;; addresses allocated in this reward cycle. This index can then be passed to `stack-aggregation-increase` +;; to later increment the STX this PoX address represents, in amounts less than the stacking minimum. +;; +;; *New in Stacks 2.1.* +(define-private (inner-stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (let ((partial-stacked + ;; fetch the partial commitments + (unwrap! (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (err ERR_STACKING_NO_SUCH_PRINCIPAL)))) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + (let ((amount-ustx (get stacked-amount partial-stacked))) + (try! (consume-signer-key-authorization pox-addr reward-cycle "agg-commit" u1 signer-sig signer-key amount-ustx max-amount auth-id)) + (try! (can-stack-stx pox-addr amount-ustx reward-cycle u1)) + ;; Add the pox addr to the reward cycle, and extract the index of the PoX address + ;; so the delegator can later use it to call stack-aggregation-increase. + (let ((add-pox-addr-info + (add-pox-addr-to-ith-reward-cycle + u0 + { pox-addr: pox-addr, + first-reward-cycle: reward-cycle, + num-cycles: u1, + reward-set-indexes: (list), + stacker: none, + signer: signer-key, + amount-ustx: amount-ustx, + i: u0 })) + (pox-addr-index (unwrap-panic + (element-at (get reward-set-indexes add-pox-addr-info) u0)))) + + ;; don't update the stacking-state map, + ;; because it _already has_ this stacker's state + ;; don't lock the STX, because the STX is already locked + ;; + ;; clear the partial-stacked state, and log it + (map-delete partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (map-set logged-partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle } partial-stacked) + (ok pox-addr-index))))) + +;; Legacy interface for stack-aggregation-commit. +;; Wraps inner-stack-aggregation-commit. See its docstring for details. +;; Returns (ok true) on success +;; Returns (err ...) on failure. +(define-public (stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (match (inner-stack-aggregation-commit pox-addr reward-cycle signer-sig signer-key max-amount auth-id) + pox-addr-index (ok true) + commit-err (err commit-err))) + +;; Public interface to `inner-stack-aggregation-commit`. See its documentation for details. +;; *New in Stacks 2.1.* +(define-public (stack-aggregation-commit-indexed (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (inner-stack-aggregation-commit pox-addr reward-cycle signer-sig signer-key max-amount auth-id)) + +;; Commit partially stacked STX to a PoX address which has already received some STX (more than the Stacking min). +;; This allows a delegator to lock up marginally more STX from new delegates, even if they collectively do not +;; exceed the Stacking minimum, so long as the target PoX address already represents at least as many STX as the +;; Stacking minimum. +;; +;; The `reward-cycle-index` is emitted as a contract event from `stack-aggregation-commit` when the initial STX are +;; locked up by this delegator. It must be passed here to add more STX behind this PoX address. If the delegator +;; called `stack-aggregation-commit` multiple times for the same PoX address, then any such `reward-cycle-index` will +;; work here. +;; +;; *New in Stacks 2.1* +;; +(define-public (stack-aggregation-increase (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (reward-cycle-index uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (let ((partial-stacked + ;; fetch the partial commitments + (unwrap! (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (err ERR_STACKING_NO_SUCH_PRINCIPAL)))) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; reward-cycle must be in the future + (asserts! (> reward-cycle (current-pox-reward-cycle)) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((partial-amount-ustx (get stacked-amount partial-stacked)) + ;; reward-cycle and reward-cycle-index must point to an existing record in reward-cycle-pox-address-list + (existing-entry (unwrap! (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }) + (err ERR_DELEGATION_NO_REWARD_SLOT))) + ;; reward-cycle must point to an existing record in reward-cycle-total-stacked + ;; infallible; getting existing-entry succeeded so this must succeed + (existing-cycle (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) + (increased-entry-total (+ (get total-ustx existing-entry) partial-amount-ustx)) + (increased-cycle-total (+ (get total-ustx existing-cycle) partial-amount-ustx)) + (existing-signer-key (get signer existing-entry))) + + ;; must be stackable + (try! (minimal-can-stack-stx pox-addr increased-entry-total reward-cycle u1)) + + ;; new total must exceed the stacking minimum + (asserts! (<= (get-stacking-minimum) increased-entry-total) + (err ERR_STACKING_THRESHOLD_NOT_MET)) + + ;; there must *not* be a stacker entry (since this is a delegator) + (asserts! (is-none (get stacker existing-entry)) + (err ERR_DELEGATION_WRONG_REWARD_SLOT)) + + ;; the given PoX address must match the one on record + (asserts! (is-eq pox-addr (get pox-addr existing-entry)) + (err ERR_DELEGATION_WRONG_REWARD_SLOT)) + + ;; Validate that amount is less than or equal to `max-amount` + (asserts! (>= max-amount increased-entry-total) (err ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH)) + + ;; Validate that signer-key matches the existing signer-key + (asserts! (is-eq existing-signer-key signer-key) (err ERR_INVALID_SIGNER_KEY)) + + ;; Verify signature from delegate that allows this sender for this cycle + ;; 'lock-period' param set to one period, same as aggregation-commit-indexed + (try! (consume-signer-key-authorization pox-addr reward-cycle "agg-increase" u1 signer-sig signer-key increased-entry-total max-amount auth-id)) + + ;; update the pox-address list -- bump the total-ustx + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: reward-cycle-index } + { pox-addr: pox-addr, + total-ustx: increased-entry-total, + stacker: none, + signer: signer-key }) + + ;; update the total ustx in this cycle + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: increased-cycle-total }) + + ;; don't update the stacking-state map, + ;; because it _already has_ this stacker's state + ;; don't lock the STX, because the STX is already locked + ;; + ;; clear the partial-stacked state, and log it + (map-delete partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (map-set logged-partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle } partial-stacked) + (ok true)))) + +;; As a delegate, stack the given principal's STX using partial-stacked-by-cycle +;; Once the delegate has stacked > minimum, the delegate should call stack-aggregation-commit +(define-public (delegate-stack-stx (stacker principal) + (amount-ustx uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (start-burn-ht uint) + (lock-period uint)) + ;; this stacker's first reward cycle is the _next_ reward cycle + (let ((first-reward-cycle (+ u1 (current-pox-reward-cycle))) + (specified-reward-cycle (+ u1 (burn-height-to-reward-cycle start-burn-ht))) + (unlock-burn-height (reward-cycle-to-burn-height (+ (current-pox-reward-cycle) u1 lock-period)))) + ;; the start-burn-ht must result in the next reward cycle, do not allow stackers + ;; to "post-date" their `stack-stx` transaction + (asserts! (is-eq first-reward-cycle specified-reward-cycle) + (err ERR_INVALID_START_BURN_HEIGHT)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED)))) + ;; must have delegated to tx-sender + (asserts! (is-eq (get delegated-to delegation-info) tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= (get amount-ustx delegation-info) amount-ustx) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match (get pox-addr delegation-info) + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match (get until-burn-ht delegation-info) + until-burn-ht (>= until-burn-ht + unlock-burn-height) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK)) + ) + + ;; stacker principal must not be stacking + (asserts! (is-none (get-stacker-info stacker)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; the Stacker must have sufficient unlocked funds + (asserts! (>= (stx-get-balance stacker) amount-ustx) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; ensure that stacking can be performed + (try! (minimal-can-stack-stx pox-addr amount-ustx first-reward-cycle lock-period)) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-reward-cycle lock-period amount-ustx) + + ;; add stacker record + (map-set stacking-state + { stacker: stacker } + { pox-addr: pox-addr, + first-reward-cycle: first-reward-cycle, + reward-set-indexes: (list), + lock-period: lock-period, + delegated-to: (some tx-sender) }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, + lock-amount: amount-ustx, + unlock-burn-height: unlock-burn-height }))) + + +;; Used for PoX parameters discovery +(define-read-only (get-pox-info) + (ok { + min-amount-ustx: (get-stacking-minimum), + reward-cycle-id: (current-pox-reward-cycle), + prepare-cycle-length: (var-get pox-prepare-cycle-length), + first-burnchain-block-height: (var-get first-burnchain-block-height), + reward-cycle-length: (var-get pox-reward-cycle-length), + total-liquid-supply-ustx: stx-liquid-supply, + }) +) + +;; Update the number of stacked STX in a given reward cycle entry. +;; `reward-cycle-index` is the index into the `reward-cycle-pox-address-list` map for a given reward cycle number. +;; `updates`, if `(some ..)`, encodes which PoX reward cycle entry (if any) gets updated. In particular, it must have +;; `(some stacker)` as the listed stacker, and must be an upcoming reward cycle. +(define-private (increase-reward-cycle-entry + (reward-cycle-index uint) + (updates (optional { first-cycle: uint, reward-cycle: uint, stacker: principal, add-amount: uint, signer-key: (buff 33) }))) + (let ((data (try! updates)) + (first-cycle (get first-cycle data)) + (reward-cycle (get reward-cycle data)) + (passed-signer-key (get signer-key data))) + (if (> first-cycle reward-cycle) + ;; not at first cycle to process yet + (some { first-cycle: first-cycle, reward-cycle: (+ u1 reward-cycle), stacker: (get stacker data), add-amount: (get add-amount data), signer-key: (get signer-key data) }) + (let ((existing-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }))) + (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) + (existing-signer-key (get signer existing-entry)) + (add-amount (get add-amount data)) + (total-ustx (+ (get total-ustx existing-total) add-amount))) + ;; stacker must match + (asserts! (is-eq (get stacker existing-entry) (some (get stacker data))) none) + ;; signer-key must match + (asserts! (is-eq existing-signer-key passed-signer-key) none) + ;; update the pox-address list + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: reward-cycle-index } + { pox-addr: (get pox-addr existing-entry), + ;; This addresses the bug in pox-2 (see SIP-022) + total-ustx: (+ (get total-ustx existing-entry) add-amount), + stacker: (some (get stacker data)), + signer: (get signer existing-entry) }) + ;; update the total + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: total-ustx }) + (some { first-cycle: first-cycle, + reward-cycle: (+ u1 reward-cycle), + stacker: (get stacker data), + add-amount: (get add-amount data), + signer-key: passed-signer-key }))))) + +;; Increase the number of STX locked. +;; *New in Stacks 2.1* +;; This method locks up an additional amount of STX from `tx-sender`'s, indicated +;; by `increase-by`. The `tx-sender` must already be Stacking & must not be +;; straddling more than one signer-key for the cycles effected. +;; Refer to `verify-signer-key-sig` for more information on the authorization parameters +;; included here. +(define-public (stack-increase + (increase-by uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (let ((stacker-info (stx-account tx-sender)) + (amount-stacked (get locked stacker-info)) + (amount-unlocked (get unlocked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (cur-cycle (current-pox-reward-cycle)) + (first-increased-cycle (+ cur-cycle u1)) + (stacker-state (unwrap! (map-get? stacking-state + { stacker: tx-sender }) + (err ERR_STACK_INCREASE_NOT_LOCKED))) + (cur-pox-addr (get pox-addr stacker-state)) + (cur-period (get lock-period stacker-state))) + ;; tx-sender must be currently locked + (asserts! (> amount-stacked u0) + (err ERR_STACK_INCREASE_NOT_LOCKED)) + ;; must be called with positive `increase-by` + (asserts! (>= increase-by u1) + (err ERR_STACKING_INVALID_AMOUNT)) + ;; stacker must have enough stx to lock + (asserts! (>= amount-unlocked increase-by) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; stacker must be directly stacking + (asserts! (> (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_IS_DELEGATED)) + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) + + ;; Validate that amount is less than or equal to `max-amount` + (asserts! (>= max-amount (+ increase-by amount-stacked)) (err ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH)) + + ;; Verify signature from delegate that allows this sender for this cycle + (try! (consume-signer-key-authorization cur-pox-addr cur-cycle "stack-increase" cur-period signer-sig signer-key increase-by max-amount auth-id)) + + ;; update reward cycle amounts + (asserts! (is-some (fold increase-reward-cycle-entry + (get reward-set-indexes stacker-state) + (some { first-cycle: first-increased-cycle, + reward-cycle: (get first-reward-cycle stacker-state), + stacker: tx-sender, + add-amount: increase-by, + signer-key: signer-key }))) + (err ERR_INVALID_INCREASE)) + ;; NOTE: stacking-state map is unchanged: it does not track amount-stacked in PoX-4 + (ok { stacker: tx-sender, total-locked: (+ amount-stacked increase-by)}))) + +;; Extend an active Stacking lock. +;; *New in Stacks 2.1* +;; This method extends the `tx-sender`'s current lockup for an additional `extend-count` +;; and associates `pox-addr` with the rewards, The `signer-key` will be the key +;; used for signing. The `tx-sender` can thus decide to change the key when extending. +;; +;; Because no additional STX are locked in this function, the `amount` field used +;; to verify the signer key authorization is zero. Refer to `verify-signer-key-sig` for more information. +(define-public (stack-extend (extend-count uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (let ((stacker-info (stx-account tx-sender)) + ;; to extend, there must already be an etry in the stacking-state + (stacker-state (unwrap! (get-stacker-info tx-sender) (err ERR_STACK_EXTEND_NOT_LOCKED))) + (amount-ustx (get locked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (cur-cycle (current-pox-reward-cycle)) + ;; first-extend-cycle will be the cycle in which tx-sender *would have* unlocked + (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) + ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) + (cur-first-reward-cycle (get first-reward-cycle stacker-state)) + (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) + + ;; must be called with positive extend-count + (asserts! (>= extend-count u1) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; stacker must be directly stacking + (asserts! (> (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_IS_DELEGATED)) + + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) + + ;; Verify signature from delegate that allows this sender for this cycle + (try! (consume-signer-key-authorization pox-addr cur-cycle "stack-extend" extend-count signer-sig signer-key u0 max-amount auth-id)) + + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) + (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) + (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) + + ;; first cycle must be after the current cycle + (asserts! (> first-extend-cycle cur-cycle) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; lock period must be positive + (asserts! (> lock-period u0) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; tx-sender must be locked + (asserts! (> amount-ustx u0) + (err ERR_STACK_EXTEND_NOT_LOCKED)) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; standard can-stack-stx checks + (try! (can-stack-stx pox-addr amount-ustx first-extend-cycle lock-period)) + + ;; register the PoX address with the amount stacked + ;; for the new cycles + (let ((extended-reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-extend-cycle extend-count amount-ustx tx-sender signer-key))) + (reward-set-indexes + ;; use the active stacker state and extend the existing reward-set-indexes + (let ((cur-cycle-index (- first-reward-cycle (get first-reward-cycle stacker-state))) + (old-indexes (get reward-set-indexes stacker-state)) + ;; build index list by taking the old-indexes starting from cur cycle + ;; and adding the new indexes to it. this way, the index is valid starting from the current cycle + (new-list (concat (default-to (list) (slice? old-indexes cur-cycle-index (len old-indexes))) + extended-reward-set-indexes))) + (unwrap-panic (as-max-len? new-list u12))))) + ;; update stacker record + (map-set stacking-state + { stacker: tx-sender } + { pox-addr: pox-addr, + reward-set-indexes: reward-set-indexes, + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: none }) + + ;; return lock-up information + (ok { stacker: tx-sender, unlock-burn-height: new-unlock-ht }))))) + +;; As a delegator, increase an active Stacking lock, issuing a "partial commitment" for the +;; increased cycles. +;; *New in Stacks 2.1* +;; This method increases `stacker`'s current lockup and partially commits the additional +;; STX to `pox-addr` +(define-public (delegate-stack-increase + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (increase-by uint)) + (let ((stacker-info (stx-account stacker)) + (existing-lock (get locked stacker-info)) + (available-stx (get unlocked stacker-info)) + (unlock-height (get unlock-height stacker-info))) + + ;; must be called with positive `increase-by` + (asserts! (>= increase-by u1) + (err ERR_STACKING_INVALID_AMOUNT)) + + (let ((unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) + (cur-cycle (current-pox-reward-cycle)) + (first-increase-cycle (+ cur-cycle u1)) + (last-increase-cycle (- unlock-in-cycle u1)) + (cycle-count (try! (if (<= first-increase-cycle last-increase-cycle) + (ok (+ u1 (- last-increase-cycle first-increase-cycle))) + (err ERR_STACKING_INVALID_LOCK_PERIOD)))) + (new-total-locked (+ increase-by existing-lock)) + (stacker-state + (unwrap! (map-get? stacking-state { stacker: stacker }) + (err ERR_STACK_INCREASE_NOT_LOCKED)))) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must not be directly stacking + (asserts! (is-eq (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_NOT_DELEGATED)) + + ;; stacker must be delegated to tx-sender + (asserts! (is-eq (unwrap! (get delegated-to stacker-state) + (err ERR_STACKING_NOT_DELEGATED)) + tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must be currently locked + (asserts! (> existing-lock u0) + (err ERR_STACK_INCREASE_NOT_LOCKED)) + + ;; stacker must have enough stx to lock + (asserts! (>= available-stx increase-by) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED))) + (delegated-to (get delegated-to delegation-info)) + (delegated-amount (get amount-ustx delegation-info)) + (delegated-pox-addr (get pox-addr delegation-info)) + (delegated-until (get until-burn-ht delegation-info))) + ;; must have delegated to tx-sender + (asserts! (is-eq delegated-to tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= delegated-amount new-total-locked) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match delegated-pox-addr + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match delegated-until + until-burn-ht + (>= until-burn-ht unlock-height) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; delegate stacking does minimal-can-stack-stx + (try! (minimal-can-stack-stx pox-addr new-total-locked first-increase-cycle (+ u1 (- last-increase-cycle first-increase-cycle)))) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-increase-cycle cycle-count increase-by) + + ;; stacking-state is unchanged, so no need to update + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, total-locked: new-total-locked})))) + +;; As a delegator, extend an active stacking lock, issuing a "partial commitment" for the +;; extended-to cycles. +;; *New in Stacks 2.1* +;; This method extends `stacker`'s current lockup for an additional `extend-count` +;; and partially commits those new cycles to `pox-addr` +(define-public (delegate-stack-extend + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (extend-count uint)) + (let ((stacker-info (stx-account stacker)) + ;; to extend, there must already be an entry in the stacking-state + (stacker-state (unwrap! (get-stacker-info stacker) (err ERR_STACK_EXTEND_NOT_LOCKED))) + (amount-ustx (get locked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + ;; first-extend-cycle will be the cycle in which tx-sender *would have* unlocked + (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) + (cur-cycle (current-pox-reward-cycle)) + ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) + (cur-first-reward-cycle (get first-reward-cycle stacker-state)) + (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) + + ;; must be called with positive extend-count + (asserts! (>= extend-count u1) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) + (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) + (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) + + ;; first cycle must be after the current cycle + (asserts! (> first-extend-cycle cur-cycle) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; lock period must be positive + (asserts! (> lock-period u0) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must not be directly stacking + (asserts! (is-eq (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_NOT_DELEGATED)) + + ;; stacker must be delegated to tx-sender + (asserts! (is-eq (unwrap! (get delegated-to stacker-state) + (err ERR_STACKING_NOT_DELEGATED)) + tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; check valid lock period + (asserts! (check-pox-lock-period lock-period) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; stacker must be currently locked + (asserts! (> amount-ustx u0) + (err ERR_STACK_EXTEND_NOT_LOCKED)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED)))) + ;; must have delegated to tx-sender + (asserts! (is-eq (get delegated-to delegation-info) tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= (get amount-ustx delegation-info) amount-ustx) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match (get pox-addr delegation-info) + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match (get until-burn-ht delegation-info) + until-burn-ht (>= until-burn-ht + new-unlock-ht) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK)) + ) + + ;; delegate stacking does minimal-can-stack-stx + (try! (minimal-can-stack-stx pox-addr amount-ustx first-extend-cycle lock-period)) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-extend-cycle extend-count amount-ustx) + + (map-set stacking-state + { stacker: stacker } + { pox-addr: pox-addr, + reward-set-indexes: (list), + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: (some tx-sender) }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, + unlock-burn-height: new-unlock-ht })))) + +;; Add an authorization for a signer key. +;; When an authorization is added, the `signer-sig` argument is not required +;; in the functions that use it as an argument. +;; The `allowed` flag can be used to either enable or disable the authorization. +;; Only the Stacks principal associated with `signer-key` can call this function. +;; +;; Refer to the documentation for `verify-signer-key-sig` for more information +;; regarding the parameters used in an authorization. When the authorization is used +;; in `stack-stx` and `stack-extend`, the `reward-cycle` refers to the reward cycle +;; where the transaction is confirmed, **not** the reward cycle where stacking begins. +;; The `period` parameter must match the exact lock period (or extend count) used +;; in the stacking transaction. The `max-amount` parameter specifies the maximum amount +;; of STX that can be locked in an individual stacking transaction. `auth-id` is a +;; random uint to prevent replays. +;; +;; *New in Stacks 3.0* +(define-public (set-signer-key-authorization (pox-addr { version: (buff 1), hashbytes: (buff 32)}) + (period uint) + (reward-cycle uint) + (topic (string-ascii 14)) + (signer-key (buff 33)) + (allowed bool) + (max-amount uint) + (auth-id uint)) + (begin + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_NOT_ALLOWED)) + ;; Validate that `tx-sender` has the same pubkey hash as `signer-key` + (asserts! (is-eq + (unwrap! (principal-construct? (if is-in-mainnet STACKS_ADDR_VERSION_MAINNET STACKS_ADDR_VERSION_TESTNET) (hash160 signer-key)) (err ERR_INVALID_SIGNER_KEY)) + tx-sender) (err ERR_NOT_ALLOWED)) + ;; Must be called with positive period + (asserts! (>= period u1) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; Must be current or future reward cycle + (asserts! (>= reward-cycle (current-pox-reward-cycle)) (err ERR_INVALID_REWARD_CYCLE)) + (map-set signer-key-authorizations { pox-addr: pox-addr, period: period, reward-cycle: reward-cycle, topic: topic, signer-key: signer-key, auth-id: auth-id, max-amount: max-amount } allowed) + (ok allowed))) + +;; Get the _current_ PoX stacking delegation information for a stacker. If the information +;; is expired, or if there's never been such a stacker, then returns none. +;; *New in Stacks 2.1* +(define-read-only (get-delegation-info (stacker principal)) + (get-check-delegation stacker) +) + +;; Get the burn height at which a particular contract is allowed to stack for a particular principal. +;; *New in Stacks 2.1* +;; Returns (some (some X)) if X is the burn height at which the allowance terminates +;; Returns (some none) if the caller is allowed indefinitely +;; Returns none if there is no allowance record +(define-read-only (get-allowance-contract-callers (sender principal) (calling-contract principal)) + (map-get? allowance-contract-callers { sender: sender, contract-caller: calling-contract }) +) + +;; How many PoX addresses in this reward cycle? +;; *New in Stacks 2.1* +(define-read-only (get-num-reward-set-pox-addresses (reward-cycle uint)) + (match (map-get? reward-cycle-pox-address-list-len { reward-cycle: reward-cycle }) + num-addrs + (get len num-addrs) + u0 + ) +) + +;; How many uSTX have been locked up for this address so far, before the delegator commits them? +;; *New in Stacks 2.1* +(define-read-only (get-partial-stacked-by-cycle (pox-addr { version: (buff 1), hashbytes: (buff 32) }) (reward-cycle uint) (sender principal)) + (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, reward-cycle: reward-cycle, sender: sender }) +) diff --git a/contrib/boot-contracts-unit-tests/contracts/indirect.clar b/contrib/boot-contracts-unit-tests/contracts/indirect.clar new file mode 100644 index 0000000000..c9889b00d2 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/contracts/indirect.clar @@ -0,0 +1,112 @@ +(define-public (stack-stx (amount-ustx uint) + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (start-burn-ht uint) + (lock-period uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 stack-stx amount-ustx pox-addr start-burn-ht lock-period signer-sig signer-key max-amount auth-id) +) + +(define-public (stack-increase + (increase-by uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 stack-increase increase-by signer-sig signer-key max-amount auth-id) +) + +(define-public (stack-extend (extend-count uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 stack-extend extend-count pox-addr signer-sig signer-key max-amount auth-id) +) + +(define-public (delegate-stx (amount-ustx uint) + (delegate-to principal) + (until-burn-ht (optional uint)) + (pox-addr (optional { version: (buff 1), hashbytes: (buff 32) }))) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 delegate-stx amount-ustx delegate-to until-burn-ht pox-addr) +) + +(define-public (revoke-delegate-stx) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 revoke-delegate-stx) +) + +(define-public (allow-contract-caller (caller principal) (until-burn-ht (optional uint))) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 allow-contract-caller caller until-burn-ht) +) + +(define-public (disallow-contract-caller (caller principal)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 disallow-contract-caller caller) +) + +(define-read-only (check-caller-allowed) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 check-caller-allowed) +) + +(define-public (delegate-stack-stx (stacker principal) + (amount-ustx uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (start-burn-ht uint) + (lock-period uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 delegate-stack-stx stacker amount-ustx pox-addr start-burn-ht lock-period) +) + +(define-public (stack-aggregation-commit-indexed (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 stack-aggregation-commit-indexed pox-addr reward-cycle signer-sig signer-key max-amount auth-id) +) + +(define-public (stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 stack-aggregation-commit pox-addr reward-cycle signer-sig signer-key max-amount auth-id) +) + +(define-public (stack-aggregation-increase (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (reward-cycle-index uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 stack-aggregation-increase pox-addr reward-cycle reward-cycle-index signer-sig signer-key max-amount auth-id) +) + +(define-public (delegate-stack-extend + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (extend-count uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 delegate-stack-extend stacker pox-addr extend-count) +) + +(define-public (delegate-stack-increase + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (increase-by uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 delegate-stack-increase stacker pox-addr increase-by) +) + +(define-public (set-signer-key-authorization (pox-addr { version: (buff 1), hashbytes: (buff 32)}) + (period uint) + (reward-cycle uint) + (topic (string-ascii 14)) + (signer-key (buff 33)) + (allowed bool) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 set-signer-key-authorization pox-addr period reward-cycle topic signer-key allowed max-amount auth-id) +) diff --git a/contrib/boot-contracts-unit-tests/deployments/default.simnet-plan.yaml b/contrib/boot-contracts-unit-tests/deployments/default.simnet-plan.yaml new file mode 100644 index 0000000000..ab7335aae2 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/deployments/default.simnet-plan.yaml @@ -0,0 +1,57 @@ +--- +id: 0 +name: "Simulated deployment, used as a default for `clarinet console`, `clarinet test` and `clarinet check`" +network: simnet +genesis: + wallets: + - name: deployer + address: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM + balance: "100000000000000" + - name: faucet + address: STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6 + balance: "100000000000000" + - name: wallet_1 + address: ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5 + balance: "100000000000000" + - name: wallet_2 + address: ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG + balance: "100000000000000" + - name: wallet_3 + address: ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC + balance: "100000000000000" + - name: wallet_4 + address: ST2NEB84ASENDXKYGJPQW86YXQCEFEX2ZQPG87ND + balance: "100000000000000" + - name: wallet_5 + address: ST2REHHS5J3CERCRBEPMGH7921Q6PYKAADT7JP2VB + balance: "100000000000000" + - name: wallet_6 + address: ST3AM1A56AK2C1XAFJ4115ZSV26EB49BVQ10MGCS0 + balance: "100000000000000" + - name: wallet_7 + address: ST3PF13W7Z0RRM42A8VZRVFQ75SV1K26RXEP8YGKJ + balance: "100000000000000" + - name: wallet_8 + address: ST3NBRSFKX28FQ2ZJ1MAKX58HKHSDGNV5N7R21XCP + balance: "100000000000000" + contracts: + - costs + - pox + - pox-2 + - pox-3 + - pox-4 + - lockup + - costs-2 + - costs-3 + - cost-voting + - bns +plan: + batches: + - id: 0 + transactions: + - emulated-contract-publish: + contract-name: indirect + emulated-sender: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM + path: contracts/indirect.clar + clarity-version: 2 + epoch: "2.4" diff --git a/contrib/boot-contracts-unit-tests/package-lock.json b/contrib/boot-contracts-unit-tests/package-lock.json new file mode 100644 index 0000000000..bee7c735ff --- /dev/null +++ b/contrib/boot-contracts-unit-tests/package-lock.json @@ -0,0 +1,2359 @@ +{ + "name": "boot-contracts-unit-tests-tests", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "boot-contracts-unit-tests-tests", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "@hirosystems/clarinet-sdk": "^2.6.0", + "@stacks/encryption": "^6.13.1", + "@stacks/network": "^6.13.0", + "@stacks/stacking": "^6.14.0", + "@stacks/transactions": "^6.13.1", + "chokidar-cli": "^3.0.0", + "typescript": "^5.3.3", + "vite": "^5.1.4", + "vitest": "^1.5.2", + "vitest-environment-clarinet": "^2.1.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.20.2.tgz", + "integrity": "sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.20.2.tgz", + "integrity": "sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.20.2.tgz", + "integrity": "sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.20.2.tgz", + "integrity": "sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.20.2.tgz", + "integrity": "sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.20.2.tgz", + "integrity": "sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.20.2.tgz", + "integrity": "sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.20.2.tgz", + "integrity": "sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.20.2.tgz", + "integrity": "sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.20.2.tgz", + "integrity": "sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.20.2.tgz", + "integrity": "sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.20.2.tgz", + "integrity": "sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==", + "cpu": [ + "loong64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.20.2.tgz", + "integrity": "sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==", + "cpu": [ + "mips64el" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.20.2.tgz", + "integrity": "sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.20.2.tgz", + "integrity": "sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==", + "cpu": [ + "riscv64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.20.2.tgz", + "integrity": "sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.20.2.tgz", + "integrity": "sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.20.2.tgz", + "integrity": "sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.20.2.tgz", + "integrity": "sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.20.2.tgz", + "integrity": "sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.20.2.tgz", + "integrity": "sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.20.2.tgz", + "integrity": "sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.20.2.tgz", + "integrity": "sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@hirosystems/clarinet-sdk": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk/-/clarinet-sdk-2.6.0.tgz", + "integrity": "sha512-8qyvpaeTmhn/Lrsg7zjNpIr9Ova1zVfzMNeBC4+y42tqxHX0j6MM58nr5m56bz5/0u+KPOvQpAhuVxGR27/NiA==", + "dependencies": { + "@hirosystems/clarinet-sdk-wasm": "^2.6.0", + "@stacks/encryption": "^6.13.0", + "@stacks/network": "^6.13.0", + "@stacks/stacking": "^6.13.0", + "@stacks/transactions": "^6.13.0", + "kolorist": "^1.8.0", + "prompts": "^2.4.2", + "vitest": "^1.0.4", + "yargs": "^17.7.2" + }, + "bin": { + "clarinet-sdk": "dist/cjs/bin/index.js" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@hirosystems/clarinet-sdk-wasm": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk-wasm/-/clarinet-sdk-wasm-2.6.0.tgz", + "integrity": "sha512-cUpYrnLX4VnpnumlYTCUNf1gFfl2kL18q63C1qFzUzkjFszffR+x0U2lxOQrz3EY3/U6eWeZvZPdKbOFO3zgqQ==" + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@noble/hashes": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.1.5.tgz", + "integrity": "sha512-LTMZiiLc+V4v1Yi16TD6aX2gmtKszNye0pQgbaLqkvhIqP7nVsSaJsWloGQjJfJ8offaoP5GtX3yY5swbcJxxQ==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@noble/secp256k1": { + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@noble/secp256k1/-/secp256k1-1.7.1.tgz", + "integrity": "sha512-hOUk6AyBFmqVrv7k5WAw/LpszxVbj9gGN4JRkIX52fdFAj1UA61KXmZDvqVEm+pOyec3+fIeZB02LYa/pWOArw==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.16.1.tgz", + "integrity": "sha512-92/y0TqNLRYOTXpm6Z7mnpvKAG9P7qmK7yJeRJSdzElNCUnsgbpAsGqerUboYRIQKzgfq4pWu9xVkgpWLfmNsw==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.16.1.tgz", + "integrity": "sha512-ttWB6ZCfRLuDIUiE0yiu5gcqOsYjA5F7kEV1ggHMj20FwLZ8A1FMeahZJFl/pnOmcnD2QL0z4AcDuo27utGU8A==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.16.1.tgz", + "integrity": "sha512-QLDvPLetbqjHojTGFw9+nuSP3YY/iz2k1cep6crYlr97sS+ZJ0W43b8Z0zC00+lnFZj6JSNxiA4DjboNQMuh1A==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.16.1.tgz", + "integrity": "sha512-TAUK/D8khRrRIa1KwRzo8JNKk3tcqaeXWdtsiLgA8zmACWwlWLjPCJ4DULGHQrMkeBjp1Cd3Yuwx04lZgFx5Vg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.16.1.tgz", + "integrity": "sha512-KO+WGZjrh6zyFTD1alIFkfdtxf8B4BC+hqd3kBZHscPLvE5FR/6QKsyuCT0JlERxxYBSUKNUQ/UHyX5uwO1x2A==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.16.1.tgz", + "integrity": "sha512-NqxbllzIB1WoAo4ThUXVtd21iiM5IHMTTXmXySKBLVcZvkU0HIZmatlP7hLzb5yQubcmdIeWmncd2NdsjocEiw==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.16.1.tgz", + "integrity": "sha512-snma5NvV8y7IECQ5rq0sr0f3UUu+92NVmG/913JXJMcXo84h9ak9TA5UI9Cl2XRM9j3m37QwDBtEYnJzRkSmxA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.16.1.tgz", + "integrity": "sha512-KOvqGprlD84ueivhCi2flvcUwDRD20mAsE3vxQNVEI2Di9tnPGAfEu6UcrSPZbM+jG2w1oSr43hrPo0RNg6GGg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.16.1.tgz", + "integrity": "sha512-/gsNwtiGLqYwN4vP+EIdUC6Q6LTlpupWqokqIndvZcjn9ig/5P01WyaYCU2wvfL/2Z82jp5kX8c1mDBOvCP3zg==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.16.1.tgz", + "integrity": "sha512-uU8zuGkQfGqfD9w6VRJZI4IuG4JIfNxxJgEmLMAmPVHREKGsxFVfgHy5c6CexQF2vOfgjB33OsET3Vdn2lln9A==", + "cpu": [ + "riscv64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.16.1.tgz", + "integrity": "sha512-lsjLtDgtcGFEuBP6yrXwkRN5/wKlvUZtfbKZZu0yaoNpiBL4epgnO21osAALIspVRnl4qZgyLFd8xjCYYWgwfw==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.16.1.tgz", + "integrity": "sha512-N2ZizKhUryqqrMfdCnjhJhZRgv61C6gK+hwVtCIKC8ts8J+go+vqENnGexwg21nHIOvLN5mBM8a7DI2vlyIOPg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.16.1.tgz", + "integrity": "sha512-5ICeMxqg66FrOA2AbnBQ2TJVxfvZsKLxmof0ibvPLaYtbsJqnTUtJOofgWb46Gjd4uZcA4rdsp4JCxegzQPqCg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.16.1.tgz", + "integrity": "sha512-1vIP6Ce02L+qWD7uZYRiFiuAJo3m9kARatWmFSnss0gZnVj2Id7OPUU9gm49JPGasgcR3xMqiH3fqBJ8t00yVg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.16.1.tgz", + "integrity": "sha512-Y3M92DcVsT6LoP+wrKpoUWPaazaP1fzbNkp0a0ZSj5Y//+pQVfVe/tQdsYQQy7dwXR30ZfALUIc9PCh9Izir6w==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.16.1.tgz", + "integrity": "sha512-x0fvpHMuF7fK5r8oZxSi8VYXkrVmRgubXpO/wcf15Lk3xZ4Jvvh5oG+u7Su1776A7XzVKZhD2eRc4t7H50gL3w==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@scure/base": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.6.tgz", + "integrity": "sha512-ok9AWwhcgYuGG3Zfhyqg+zwl+Wn5uE+dwC0NV/2qQkx4dABbb/bx96vWu8NSj+BNjjSjno+JRYRjle1jV08k3g==", + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@scure/bip39": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@scure/bip39/-/bip39-1.1.0.tgz", + "integrity": "sha512-pwrPOS16VeTKg98dYXQyIjJEcWfz7/1YJIwxUEPFfQPtc86Ym/1sVgQ2RLoD43AazMk2l/unK4ITySSpW2+82w==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "dependencies": { + "@noble/hashes": "~1.1.1", + "@scure/base": "~1.1.0" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" + }, + "node_modules/@stacks/common": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@stacks/common/-/common-6.13.0.tgz", + "integrity": "sha512-wwzyihjaSdmL6NxKvDeayy3dqM0L0Q2sawmdNtzJDi0FnXuJGm5PeapJj7bEfcI9XwI7Bw5jZoC6mCn9nc5YIw==", + "dependencies": { + "@types/bn.js": "^5.1.0", + "@types/node": "^18.0.4" + } + }, + "node_modules/@stacks/encryption": { + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/@stacks/encryption/-/encryption-6.13.1.tgz", + "integrity": "sha512-y5IFX3/nGI3fCk70gE0JwH70GpshD8RhUfvhMLcL96oNaec1cCdj1ZUiQupeicfYTHuraaVBYU9xLls4TRmypg==", + "dependencies": { + "@noble/hashes": "1.1.5", + "@noble/secp256k1": "1.7.1", + "@scure/bip39": "1.1.0", + "@stacks/common": "^6.13.0", + "@types/node": "^18.0.4", + "base64-js": "^1.5.1", + "bs58": "^5.0.0", + "ripemd160-min": "^0.0.6", + "varuint-bitcoin": "^1.1.2" + } + }, + "node_modules/@stacks/network": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@stacks/network/-/network-6.13.0.tgz", + "integrity": "sha512-Ss/Da4BNyPBBj1OieM981fJ7SkevKqLPkzoI1+Yo7cYR2df+0FipIN++Z4RfpJpc8ne60vgcx7nJZXQsiGhKBQ==", + "dependencies": { + "@stacks/common": "^6.13.0", + "cross-fetch": "^3.1.5" + } + }, + "node_modules/@stacks/stacking": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/@stacks/stacking/-/stacking-6.14.0.tgz", + "integrity": "sha512-P6ITXYpb5q4hgWMPimJW84mih3hQuQ0ko7AcnJ4SPy17nt1rxEz7/zgyRnqg1Lc18zt4HqfF9SKM7+Sqt/EMZA==", + "dependencies": { + "@noble/hashes": "1.1.5", + "@scure/base": "1.1.1", + "@stacks/common": "^6.13.0", + "@stacks/encryption": "^6.13.1", + "@stacks/network": "^6.13.0", + "@stacks/stacks-blockchain-api-types": "^0.61.0", + "@stacks/transactions": "^6.13.1", + "bs58": "^5.0.0" + } + }, + "node_modules/@stacks/stacking/node_modules/@scure/base": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.1.tgz", + "integrity": "sha512-ZxOhsSyxYwLJj3pLZCefNitxsj093tb2vq90mp2txoYeBqbcjDjqFhyM8eUjq/uFm6zJ+mUuqxlS2FkuSY1MTA==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@stacks/stacks-blockchain-api-types": { + "version": "0.61.0", + "resolved": "https://registry.npmjs.org/@stacks/stacks-blockchain-api-types/-/stacks-blockchain-api-types-0.61.0.tgz", + "integrity": "sha512-yPOfTUboo5eA9BZL/hqMcM71GstrFs9YWzOrJFPeP4cOO1wgYvAcckgBRbgiE3NqeX0A7SLZLDAXLZbATuRq9w==" + }, + "node_modules/@stacks/transactions": { + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/@stacks/transactions/-/transactions-6.13.1.tgz", + "integrity": "sha512-PWw2I+2Fj3CaFYQIoVcqQN6E2qGHNhFv03nuR0CxMq0sx8stPgYZbdzUlnlBcJQdsFiHrw3sPeqnXDZt+Hg5YQ==", + "dependencies": { + "@noble/hashes": "1.1.5", + "@noble/secp256k1": "1.7.1", + "@stacks/common": "^6.13.0", + "@stacks/network": "^6.13.0", + "c32check": "^2.0.0", + "lodash.clonedeep": "^4.5.0" + } + }, + "node_modules/@types/bn.js": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-5.1.5.tgz", + "integrity": "sha512-V46N0zwKRF5Q00AZ6hWtN0T8gGmDUaUzLWQvHFo5yThtVwK/VCenFY3wXVbOvNfajEpsTfQM4IN9k/d6gUVX3A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + }, + "node_modules/@types/node": { + "version": "18.19.31", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.31.tgz", + "integrity": "sha512-ArgCD39YpyyrtFKIqMDvjz79jto5fcI/SVUs2HwB+f0dAzq68yqOdyaSivLiLugSziTpNXLQrVb7RZFmdZzbhA==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@vitest/expect": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-1.5.2.tgz", + "integrity": "sha512-rf7MTD1WCoDlN3FfYJ9Llfp0PbdtOMZ3FIF0AVkDnKbp3oiMW1c8AmvRZBcqbAhDUAvF52e9zx4WQM1r3oraVA==", + "dependencies": { + "@vitest/spy": "1.5.2", + "@vitest/utils": "1.5.2", + "chai": "^4.3.10" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-1.5.2.tgz", + "integrity": "sha512-7IJ7sJhMZrqx7HIEpv3WrMYcq8ZNz9L6alo81Y6f8hV5mIE6yVZsFoivLZmr0D777klm1ReqonE9LyChdcmw6g==", + "dependencies": { + "@vitest/utils": "1.5.2", + "p-limit": "^5.0.0", + "pathe": "^1.1.1" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-1.5.2.tgz", + "integrity": "sha512-CTEp/lTYos8fuCc9+Z55Ga5NVPKUgExritjF5VY7heRFUfheoAqBneUlvXSUJHUZPjnPmyZA96yLRJDP1QATFQ==", + "dependencies": { + "magic-string": "^0.30.5", + "pathe": "^1.1.1", + "pretty-format": "^29.7.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-1.5.2.tgz", + "integrity": "sha512-xCcPvI8JpCtgikT9nLpHPL1/81AYqZy1GCy4+MCHBE7xi8jgsYkULpW5hrx5PGLgOQjUpb6fd15lqcriJ40tfQ==", + "dependencies": { + "tinyspy": "^2.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-1.5.2.tgz", + "integrity": "sha512-sWOmyofuXLJ85VvXNsroZur7mOJGiQeM0JN3/0D1uU8U9bGFM69X1iqHaRXl6R8BwaLY6yPCogP257zxTzkUdA==", + "dependencies": { + "diff-sequences": "^29.6.3", + "estree-walker": "^3.0.3", + "loupe": "^2.3.7", + "pretty-format": "^29.7.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/acorn": { + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.2.tgz", + "integrity": "sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/assertion-error": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", + "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", + "engines": { + "node": "*" + } + }, + "node_modules/base-x": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/base-x/-/base-x-4.0.0.tgz", + "integrity": "sha512-FuwxlW4H5kh37X/oW59pwTzzTKRzfrrQwhmyspRM7swOEZcHtDZSCt45U6oKgtuFE+WYPblePMVIPR4RZrh/hw==" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/bs58": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/bs58/-/bs58-5.0.0.tgz", + "integrity": "sha512-r+ihvQJvahgYT50JD05dyJNKlmmSlMoOGwn1lCcEzanPglg7TxYjioQUYehQ9mAR/+hOSd2jRc/Z2y5UxBymvQ==", + "dependencies": { + "base-x": "^4.0.0" + } + }, + "node_modules/c32check": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/c32check/-/c32check-2.0.0.tgz", + "integrity": "sha512-rpwfAcS/CMqo0oCqDf3r9eeLgScRE3l/xHDCXhM3UyrfvIn7PrLq63uHh7yYbv8NzaZn5MVsVhIRpQ+5GZ5HyA==", + "dependencies": { + "@noble/hashes": "^1.1.2", + "base-x": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/chai": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.4.1.tgz", + "integrity": "sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==", + "dependencies": { + "assertion-error": "^1.1.0", + "check-error": "^1.0.3", + "deep-eql": "^4.1.3", + "get-func-name": "^2.0.2", + "loupe": "^2.3.6", + "pathval": "^1.1.1", + "type-detect": "^4.0.8" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/check-error": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.3.tgz", + "integrity": "sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==", + "dependencies": { + "get-func-name": "^2.0.2" + }, + "engines": { + "node": "*" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar-cli": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chokidar-cli/-/chokidar-cli-3.0.0.tgz", + "integrity": "sha512-xVW+Qeh7z15uZRxHOkP93Ux8A0xbPzwK4GaqD8dQOYc34TlkqUhVSS59fK36DOp5WdJlrRzlYSy02Ht99FjZqQ==", + "dependencies": { + "chokidar": "^3.5.2", + "lodash.debounce": "^4.0.8", + "lodash.throttle": "^4.1.1", + "yargs": "^13.3.0" + }, + "bin": { + "chokidar": "index.js" + }, + "engines": { + "node": ">= 8.10.0" + } + }, + "node_modules/chokidar-cli/node_modules/ansi-regex": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz", + "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==", + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/chokidar-cli/node_modules/cliui": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", + "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", + "dependencies": { + "string-width": "^3.1.0", + "strip-ansi": "^5.2.0", + "wrap-ansi": "^5.1.0" + } + }, + "node_modules/chokidar-cli/node_modules/emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" + }, + "node_modules/chokidar-cli/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha512-VHskAKYM8RfSFXwee5t5cbN5PZeq1Wrh6qd5bkyiXIf6UQcN6w/A0eXM9r6t8d+GYOh+o6ZhiEnb88LN/Y8m2w==", + "engines": { + "node": ">=4" + } + }, + "node_modules/chokidar-cli/node_modules/string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dependencies": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dependencies": { + "ansi-regex": "^4.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/wrap-ansi": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", + "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", + "dependencies": { + "ansi-styles": "^3.2.0", + "string-width": "^3.0.0", + "strip-ansi": "^5.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/y18n": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", + "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" + }, + "node_modules/chokidar-cli/node_modules/yargs": { + "version": "13.3.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", + "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", + "dependencies": { + "cliui": "^5.0.0", + "find-up": "^3.0.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^3.0.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^13.1.2" + } + }, + "node_modules/chokidar-cli/node_modules/yargs-parser": { + "version": "13.1.2", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", + "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", + "dependencies": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/confbox": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.7.tgz", + "integrity": "sha512-uJcB/FKZtBMCJpK8MQji6bJHgu1tixKPxRLeGkNzBoOZzpnZUJm0jm2/sBDWcuBx1dYgxV4JU+g5hmNxCyAmdA==" + }, + "node_modules/cross-fetch": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz", + "integrity": "sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==", + "dependencies": { + "node-fetch": "^2.6.12" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/deep-eql": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.3.tgz", + "integrity": "sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==", + "dependencies": { + "type-detect": "^4.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/esbuild": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.20.2.tgz", + "integrity": "sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==", + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.20.2", + "@esbuild/android-arm": "0.20.2", + "@esbuild/android-arm64": "0.20.2", + "@esbuild/android-x64": "0.20.2", + "@esbuild/darwin-arm64": "0.20.2", + "@esbuild/darwin-x64": "0.20.2", + "@esbuild/freebsd-arm64": "0.20.2", + "@esbuild/freebsd-x64": "0.20.2", + "@esbuild/linux-arm": "0.20.2", + "@esbuild/linux-arm64": "0.20.2", + "@esbuild/linux-ia32": "0.20.2", + "@esbuild/linux-loong64": "0.20.2", + "@esbuild/linux-mips64el": "0.20.2", + "@esbuild/linux-ppc64": "0.20.2", + "@esbuild/linux-riscv64": "0.20.2", + "@esbuild/linux-s390x": "0.20.2", + "@esbuild/linux-x64": "0.20.2", + "@esbuild/netbsd-x64": "0.20.2", + "@esbuild/openbsd-x64": "0.20.2", + "@esbuild/sunos-x64": "0.20.2", + "@esbuild/win32-arm64": "0.20.2", + "@esbuild/win32-ia32": "0.20.2", + "@esbuild/win32-x64": "0.20.2" + } + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/execa": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz", + "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^8.0.1", + "human-signals": "^5.0.0", + "is-stream": "^3.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^5.1.0", + "onetime": "^6.0.0", + "signal-exit": "^4.1.0", + "strip-final-newline": "^3.0.0" + }, + "engines": { + "node": ">=16.17" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-func-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.2.tgz", + "integrity": "sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==", + "engines": { + "node": "*" + } + }, + "node_modules/get-stream": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz", + "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/human-signals": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz", + "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==", + "engines": { + "node": ">=16.17.0" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-stream": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", + "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/js-tokens": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.0.tgz", + "integrity": "sha512-WriZw1luRMlmV3LGJaR6QOJjWwgLUTf89OwT2lUOyjX2dJGBwgmIkbcz+7WFZjrZM635JOIR517++e/67CP9dQ==" + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/kolorist": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/kolorist/-/kolorist-1.8.0.tgz", + "integrity": "sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==" + }, + "node_modules/local-pkg": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.5.0.tgz", + "integrity": "sha512-ok6z3qlYyCDS4ZEU27HaU6x/xZa9Whf8jD4ptH5UZTQYZVYeb9bnZ3ojVhiJNLiXK1Hfc0GNbLXcmZ5plLDDBg==", + "dependencies": { + "mlly": "^1.4.2", + "pkg-types": "^1.0.3" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lodash.clonedeep": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", + "integrity": "sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "node_modules/lodash.throttle": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz", + "integrity": "sha512-wIkUCfVKpVsWo3JSZlc+8MB5it+2AN5W8J7YVMST30UrvcQNZ1Okbj+rbVniijTWE6FGYy4XJq/rHkas8qJMLQ==" + }, + "node_modules/loupe": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.7.tgz", + "integrity": "sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==", + "dependencies": { + "get-func-name": "^2.0.1" + } + }, + "node_modules/magic-string": { + "version": "0.30.10", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.10.tgz", + "integrity": "sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/mimic-fn": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", + "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mlly": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.6.1.tgz", + "integrity": "sha512-vLgaHvaeunuOXHSmEbZ9izxPx3USsk8KCQ8iC+aTlp5sKRSoZvwhHh5L9VbKSaVC6sJDqbyohIS76E2VmHIPAA==", + "dependencies": { + "acorn": "^8.11.3", + "pathe": "^1.1.2", + "pkg-types": "^1.0.3", + "ufo": "^1.3.2" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz", + "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==", + "dependencies": { + "path-key": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/onetime": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", + "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", + "dependencies": { + "mimic-fn": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-limit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-5.0.0.tgz", + "integrity": "sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==", + "dependencies": { + "yocto-queue": "^1.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/p-locate/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/pathe": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", + "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==" + }, + "node_modules/pathval": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", + "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==", + "engines": { + "node": "*" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.1.0.tgz", + "integrity": "sha512-/RpmvKdxKf8uILTtoOhAgf30wYbP2Qw+L9p3Rvshx1JZVX+XQNZQFjlbmGHEGIm4CkVPlSn+NXmIM8+9oWQaSA==", + "dependencies": { + "confbox": "^0.1.7", + "mlly": "^1.6.1", + "pathe": "^1.1.2" + } + }, + "node_modules/postcss": { + "version": "8.4.38", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz", + "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/react-is": { + "version": "18.3.0", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.0.tgz", + "integrity": "sha512-wRiUsea88TjKDc4FBEn+sLvIDesp6brMbGWnJGjew2waAc9evdhja/2LvePc898HJbHw0L+MTWy7NhpnELAvLQ==" + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" + }, + "node_modules/ripemd160-min": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/ripemd160-min/-/ripemd160-min-0.0.6.tgz", + "integrity": "sha512-+GcJgQivhs6S9qvLogusiTcS9kQUfgR75whKuy5jIhuiOfQuJ8fjqxV6EGD5duH1Y/FawFUMtMhyeq3Fbnib8A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/rollup": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.16.1.tgz", + "integrity": "sha512-5CaD3MPDlPKfhqzRvWXK96G6ELJfPZNb3LHiZxTHgDdC6jvwfGz2E8nY+9g1ONk4ttHsK1WaFP19Js4PSr1E3g==", + "dependencies": { + "@types/estree": "1.0.5" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.16.1", + "@rollup/rollup-android-arm64": "4.16.1", + "@rollup/rollup-darwin-arm64": "4.16.1", + "@rollup/rollup-darwin-x64": "4.16.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.16.1", + "@rollup/rollup-linux-arm-musleabihf": "4.16.1", + "@rollup/rollup-linux-arm64-gnu": "4.16.1", + "@rollup/rollup-linux-arm64-musl": "4.16.1", + "@rollup/rollup-linux-powerpc64le-gnu": "4.16.1", + "@rollup/rollup-linux-riscv64-gnu": "4.16.1", + "@rollup/rollup-linux-s390x-gnu": "4.16.1", + "@rollup/rollup-linux-x64-gnu": "4.16.1", + "@rollup/rollup-linux-x64-musl": "4.16.1", + "@rollup/rollup-win32-arm64-msvc": "4.16.1", + "@rollup/rollup-win32-ia32-msvc": "4.16.1", + "@rollup/rollup-win32-x64-msvc": "4.16.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==" + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "node_modules/source-map-js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==" + }, + "node_modules/std-env": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz", + "integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==" + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", + "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strip-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-2.1.0.tgz", + "integrity": "sha512-Op+UycaUt/8FbN/Z2TWPBLge3jWrP3xj10f3fnYxf052bKuS3EKs1ZQcVGjnEMdsNVAM+plXRdmjrZ/KgG3Skw==", + "dependencies": { + "js-tokens": "^9.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/tinybench": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.8.0.tgz", + "integrity": "sha512-1/eK7zUnIklz4JUUlL+658n58XO2hHLQfSk1Zf2LKieUjxidN16eKFEoDEfjHc3ohofSSqK3X5yO6VGb6iW8Lw==" + }, + "node_modules/tinypool": { + "version": "0.8.4", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.8.4.tgz", + "integrity": "sha512-i11VH5gS6IFeLY3gMBQ00/MmLncVP7JLXOw1vlgkytLmJK7QnEr7NXf0LBdxfmNPAeyetukOk0bOYrJrFGjYJQ==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-2.2.1.tgz", + "integrity": "sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/typescript": { + "version": "5.4.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz", + "integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/ufo": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.5.3.tgz", + "integrity": "sha512-Y7HYmWaFwPUmkoQCUIAYpKqkOf+SbVj/2fJJZ4RJMCfZp0rTGwRbzQD+HghfnhKOjL9E01okqz+ncJskGYfBNw==" + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/varuint-bitcoin": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/varuint-bitcoin/-/varuint-bitcoin-1.1.2.tgz", + "integrity": "sha512-4EVb+w4rx+YfVM32HQX42AbbT7/1f5zwAYhIujKXKk8NQK+JfRVl3pqT3hjNn/L+RstigmGGKVwHA/P0wgITZw==", + "dependencies": { + "safe-buffer": "^5.1.1" + } + }, + "node_modules/vite": { + "version": "5.2.10", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.10.tgz", + "integrity": "sha512-PAzgUZbP7msvQvqdSD+ErD5qGnSFiGOoWmV5yAKUEI0kdhjbH6nMWVyZQC/hSc4aXwc0oJ9aEdIiF9Oje0JFCw==", + "dependencies": { + "esbuild": "^0.20.1", + "postcss": "^8.4.38", + "rollup": "^4.13.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite-node": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-1.5.2.tgz", + "integrity": "sha512-Y8p91kz9zU+bWtF7HGt6DVw2JbhyuB2RlZix3FPYAYmUyZ3n7iTp8eSyLyY6sxtPegvxQtmlTMhfPhUfCUF93A==", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.3.4", + "pathe": "^1.1.1", + "picocolors": "^1.0.0", + "vite": "^5.0.0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vitest": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-1.5.2.tgz", + "integrity": "sha512-l9gwIkq16ug3xY7BxHwcBQovLZG75zZL0PlsiYQbf76Rz6QGs54416UWMtC0jXeihvHvcHrf2ROEjkQRVpoZYw==", + "dependencies": { + "@vitest/expect": "1.5.2", + "@vitest/runner": "1.5.2", + "@vitest/snapshot": "1.5.2", + "@vitest/spy": "1.5.2", + "@vitest/utils": "1.5.2", + "acorn-walk": "^8.3.2", + "chai": "^4.3.10", + "debug": "^4.3.4", + "execa": "^8.0.1", + "local-pkg": "^0.5.0", + "magic-string": "^0.30.5", + "pathe": "^1.1.1", + "picocolors": "^1.0.0", + "std-env": "^3.5.0", + "strip-literal": "^2.0.0", + "tinybench": "^2.5.1", + "tinypool": "^0.8.3", + "vite": "^5.0.0", + "vite-node": "1.5.2", + "why-is-node-running": "^2.2.2" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/node": "^18.0.0 || >=20.0.0", + "@vitest/browser": "1.5.2", + "@vitest/ui": "1.5.2", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/vitest-environment-clarinet": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/vitest-environment-clarinet/-/vitest-environment-clarinet-2.1.0.tgz", + "integrity": "sha512-1SA9XZh47qmbV724sGo2FyjVU+Ar3m5TOU4bLGSlWDb/x388IKUPrHbHWqIQNwY+gwEm9VBfXEAd1LOSUdemBw==", + "peerDependencies": { + "@hirosystems/clarinet-sdk": ">=2.6.0", + "vitest": "^1.5.2" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-module": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz", + "integrity": "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==" + }, + "node_modules/why-is-node-running": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.2.2.tgz", + "integrity": "sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/wrap-ansi/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/contrib/boot-contracts-unit-tests/package.json b/contrib/boot-contracts-unit-tests/package.json new file mode 100644 index 0000000000..ffd2108a07 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/package.json @@ -0,0 +1,26 @@ +{ + "name": "boot-contracts-unit-tests-tests", + "version": "1.0.0", + "description": "Run unit tests on this project.", + "type": "module", + "private": true, + "scripts": { + "test": "vitest run", + "test:report": "vitest run -- --coverage --costs", + "test:watch": "chokidar \"tests/**/*.ts\" \"contracts/**/*.clar\" -c \"npm run test:report\"" + }, + "author": "", + "license": "ISC", + "dependencies": { + "@hirosystems/clarinet-sdk": "^2.6.0", + "@stacks/encryption": "^6.13.1", + "@stacks/network": "^6.13.0", + "@stacks/stacking": "^6.14.0", + "@stacks/transactions": "^6.13.1", + "chokidar-cli": "^3.0.0", + "typescript": "^5.3.3", + "vite": "^5.1.4", + "vitest": "^1.5.2", + "vitest-environment-clarinet": "^2.1.0" + } +} diff --git a/contrib/boot-contracts-unit-tests/settings/Devnet.toml b/contrib/boot-contracts-unit-tests/settings/Devnet.toml new file mode 100644 index 0000000000..eb43b6be05 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/settings/Devnet.toml @@ -0,0 +1,151 @@ +[network] +name = "devnet" +deployment_fee_rate = 10 + +[accounts.deployer] +mnemonic = "twice kind fence tip hidden tilt action fragile skin nothing glory cousin green tomorrow spring wrist shed math olympic multiply hip blue scout claw" +balance = 100_000_000_000_000 +# secret_key: 753b7cc01a1a2e86221266a154af739463fce51219d97e4f856cd7200c3bd2a601 +# stx_address: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM +# btc_address: mqVnk6NPRdhntvfm4hh9vvjiRkFDUuSYsH + +[accounts.wallet_1] +mnemonic = "sell invite acquire kitten bamboo drastic jelly vivid peace spawn twice guilt pave pen trash pretty park cube fragile unaware remain midnight betray rebuild" +balance = 100_000_000_000_000 +# secret_key: 7287ba251d44a4d3fd9276c88ce34c5c52a038955511cccaf77e61068649c17801 +# stx_address: ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5 +# btc_address: mr1iPkD9N3RJZZxXRk7xF9d36gffa6exNC + +[accounts.wallet_2] +mnemonic = "hold excess usual excess ring elephant install account glad dry fragile donkey gaze humble truck breeze nation gasp vacuum limb head keep delay hospital" +balance = 100_000_000_000_000 +# secret_key: 530d9f61984c888536871c6573073bdfc0058896dc1adfe9a6a10dfacadc209101 +# stx_address: ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG +# btc_address: muYdXKmX9bByAueDe6KFfHd5Ff1gdN9ErG + +[accounts.wallet_3] +mnemonic = "cycle puppy glare enroll cost improve round trend wrist mushroom scorpion tower claim oppose clever elephant dinosaur eight problem before frozen dune wagon high" +balance = 100_000_000_000_000 +# secret_key: d655b2523bcd65e34889725c73064feb17ceb796831c0e111ba1a552b0f31b3901 +# stx_address: ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC +# btc_address: mvZtbibDAAA3WLpY7zXXFqRa3T4XSknBX7 + +[accounts.wallet_4] +mnemonic = "board list obtain sugar hour worth raven scout denial thunder horse logic fury scorpion fold genuine phrase wealth news aim below celery when cabin" +balance = 100_000_000_000_000 +# secret_key: f9d7206a47f14d2870c163ebab4bf3e70d18f5d14ce1031f3902fbbc894fe4c701 +# stx_address: ST2NEB84ASENDXKYGJPQW86YXQCEFEX2ZQPG87ND +# btc_address: mg1C76bNTutiCDV3t9nWhZs3Dc8LzUufj8 + +[accounts.wallet_5] +mnemonic = "hurry aunt blame peanut heavy update captain human rice crime juice adult scale device promote vast project quiz unit note reform update climb purchase" +balance = 100_000_000_000_000 +# secret_key: 3eccc5dac8056590432db6a35d52b9896876a3d5cbdea53b72400bc9c2099fe801 +# stx_address: ST2REHHS5J3CERCRBEPMGH7921Q6PYKAADT7JP2VB +# btc_address: mweN5WVqadScHdA81aATSdcVr4B6dNokqx + +[accounts.wallet_6] +mnemonic = "area desk dutch sign gold cricket dawn toward giggle vibrant indoor bench warfare wagon number tiny universe sand talk dilemma pottery bone trap buddy" +balance = 100_000_000_000_000 +# secret_key: 7036b29cb5e235e5fd9b09ae3e8eec4404e44906814d5d01cbca968a60ed4bfb01 +# stx_address: ST3AM1A56AK2C1XAFJ4115ZSV26EB49BVQ10MGCS0 +# btc_address: mzxXgV6e4BZSsz8zVHm3TmqbECt7mbuErt + +[accounts.wallet_7] +mnemonic = "prevent gallery kind limb income control noise together echo rival record wedding sense uncover school version force bleak nuclear include danger skirt enact arrow" +balance = 100_000_000_000_000 +# secret_key: b463f0df6c05d2f156393eee73f8016c5372caa0e9e29a901bb7171d90dc4f1401 +# stx_address: ST3PF13W7Z0RRM42A8VZRVFQ75SV1K26RXEP8YGKJ +# btc_address: n37mwmru2oaVosgfuvzBwgV2ysCQRrLko7 + +[accounts.wallet_8] +mnemonic = "female adjust gallery certain visit token during great side clown fitness like hurt clip knife warm bench start reunion globe detail dream depend fortune" +balance = 100_000_000_000_000 +# secret_key: 6a1a754ba863d7bab14adbbc3f8ebb090af9e871ace621d3e5ab634e1422885e01 +# stx_address: ST3NBRSFKX28FQ2ZJ1MAKX58HKHSDGNV5N7R21XCP +# btc_address: n2v875jbJ4RjBnTjgbfikDfnwsDV5iUByw + +[accounts.faucet] +mnemonic = "shadow private easily thought say logic fault paddle word top book during ignore notable orange flight clock image wealth health outside kitten belt reform" +balance = 100_000_000_000_000 +# secret_key: de433bdfa14ec43aa1098d5be594c8ffb20a31485ff9de2923b2689471c401b801 +# stx_address: STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6 +# btc_address: mjSrB3wS4xab3kYqFktwBzfTdPg367ZJ2d + +[devnet] +disable_stacks_explorer = false +disable_stacks_api = false +# disable_subnet_api = false +# disable_bitcoin_explorer = true +# working_dir = "tmp/devnet" +# stacks_node_events_observers = ["host.docker.internal:8002"] +# miner_mnemonic = "fragile loan twenty basic net assault jazz absorb diet talk art shock innocent float punch travel gadget embrace caught blossom hockey surround initial reduce" +# miner_derivation_path = "m/44'/5757'/0'/0/0" +# faucet_mnemonic = "shadow private easily thought say logic fault paddle word top book during ignore notable orange flight clock image wealth health outside kitten belt reform" +# faucet_derivation_path = "m/44'/5757'/0'/0/0" +# orchestrator_port = 20445 +# bitcoin_node_p2p_port = 18444 +# bitcoin_node_rpc_port = 18443 +# bitcoin_node_username = "devnet" +# bitcoin_node_password = "devnet" +# bitcoin_controller_block_time = 30_000 +# stacks_node_rpc_port = 20443 +# stacks_node_p2p_port = 20444 +# stacks_api_port = 3999 +# stacks_api_events_port = 3700 +# bitcoin_explorer_port = 8001 +# stacks_explorer_port = 8000 +# postgres_port = 5432 +# postgres_username = "postgres" +# postgres_password = "postgres" +# postgres_database = "postgres" +# bitcoin_node_image_url = "quay.io/hirosystems/bitcoind:26.0" +# stacks_node_image_url = "quay.io/hirosystems/stacks-node:devnet-2.5" +# stacks_signer_image_url = "quay.io/hirosystems/stacks-node:devnet-2.5" +# stacks_api_image_url = "hirosystems/stacks-blockchain-api:master" +# stacks_explorer_image_url = "hirosystems/explorer:latest" +# bitcoin_explorer_image_url = "quay.io/hirosystems/bitcoin-explorer:devnet" +# postgres_image_url = "postgres:alpine" +# enable_subnet_node = true +# subnet_node_image_url = "hirosystems/stacks-subnets:0.8.1" +# subnet_leader_mnemonic = "twice kind fence tip hidden tilt action fragile skin nothing glory cousin green tomorrow spring wrist shed math olympic multiply hip blue scout claw" +# subnet_leader_derivation_path = "m/44'/5757'/0'/0/0" +# subnet_contract_id = "ST173JK7NZBA4BS05ZRATQH1K89YJMTGEH1Z5J52E.subnet-v3-0-1" +# subnet_node_rpc_port = 30443 +# subnet_node_p2p_port = 30444 +# subnet_events_ingestion_port = 30445 +# subnet_node_events_observers = ["host.docker.internal:8002"] +# subnet_api_image_url = "hirosystems/stacks-blockchain-api:master" +# subnet_api_postgres_database = "subnet_api" + +# For testing in epoch 2.1 / using Clarity2 +# epoch_2_0 = 100 +# epoch_2_05 = 100 +# epoch_2_1 = 101 +# epoch_2_2 = 102 +# epoch_2_3 = 103 +# epoch_2_4 = 104 +# epoch_2_5 = 108 + + +# Send some stacking orders +[[devnet.pox_stacking_orders]] +start_at_cycle = 1 +duration = 12 +wallet = "wallet_1" +slots = 2 +btc_address = "mr1iPkD9N3RJZZxXRk7xF9d36gffa6exNC" + +[[devnet.pox_stacking_orders]] +start_at_cycle = 1 +duration = 12 +wallet = "wallet_2" +slots = 1 +btc_address = "muYdXKmX9bByAueDe6KFfHd5Ff1gdN9ErG" + +[[devnet.pox_stacking_orders]] +start_at_cycle = 1 +duration = 12 +wallet = "wallet_3" +slots = 1 +btc_address = "mvZtbibDAAA3WLpY7zXXFqRa3T4XSknBX7" diff --git a/contrib/boot-contracts-unit-tests/tests/helpers.ts b/contrib/boot-contracts-unit-tests/tests/helpers.ts new file mode 100644 index 0000000000..9fb55187b2 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tests/helpers.ts @@ -0,0 +1,548 @@ +import { ClarityEvent } from "@hirosystems/clarinet-sdk"; +import { + getPublicKeyFromPrivate, + publicKeyToBtcAddress, +} from "@stacks/encryption"; +import { StacksDevnet } from "@stacks/network"; +import { + Pox4SignatureTopic, + StackingClient, + poxAddressToTuple, +} from "@stacks/stacking"; +import { + Cl, + ResponseOkCV, + StacksPrivateKey, + TransactionVersion, + TupleCV, + UIntCV, + createStacksPrivateKey, + getAddressFromPrivateKey, +} from "@stacks/transactions"; +import { expect } from "vitest"; + +export const POX_DEPLOYER = "ST000000000000000000002AMW42H"; +export const POX_CONTRACT = `${POX_DEPLOYER}.pox-4`; + +// Error codes from the contract +export const ERRORS = { + ERR_STACKING_UNREACHABLE: 255, + ERR_STACKING_CORRUPTED_STATE: 254, + ERR_STACKING_INSUFFICIENT_FUNDS: 1, + ERR_STACKING_INVALID_LOCK_PERIOD: 2, + ERR_STACKING_ALREADY_STACKED: 3, + ERR_STACKING_NO_SUCH_PRINCIPAL: 4, + ERR_STACKING_EXPIRED: 5, + ERR_STACKING_STX_LOCKED: 6, + ERR_STACKING_PERMISSION_DENIED: 9, + ERR_STACKING_THRESHOLD_NOT_MET: 11, + ERR_STACKING_POX_ADDRESS_IN_USE: 12, + ERR_STACKING_INVALID_POX_ADDRESS: 13, + ERR_STACKING_INVALID_AMOUNT: 18, + ERR_NOT_ALLOWED: 19, + ERR_STACKING_ALREADY_DELEGATED: 20, + ERR_DELEGATION_EXPIRES_DURING_LOCK: 21, + ERR_DELEGATION_TOO_MUCH_LOCKED: 22, + ERR_DELEGATION_POX_ADDR_REQUIRED: 23, + ERR_INVALID_START_BURN_HEIGHT: 24, + ERR_NOT_CURRENT_STACKER: 25, + ERR_STACK_EXTEND_NOT_LOCKED: 26, + ERR_STACK_INCREASE_NOT_LOCKED: 27, + ERR_DELEGATION_NO_REWARD_SLOT: 28, + ERR_DELEGATION_WRONG_REWARD_SLOT: 29, + ERR_STACKING_IS_DELEGATED: 30, + ERR_STACKING_NOT_DELEGATED: 31, + ERR_INVALID_SIGNER_KEY: 32, + ERR_REUSED_SIGNER_KEY: 33, + ERR_DELEGATION_ALREADY_REVOKED: 34, + ERR_INVALID_SIGNATURE_PUBKEY: 35, + ERR_INVALID_SIGNATURE_RECOVER: 36, + ERR_INVALID_REWARD_CYCLE: 37, + ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH: 38, + ERR_SIGNER_AUTH_USED: 39, + ERR_INVALID_INCREASE: 40, +}; + +// Keys to use for stacking +// wallet_1, wallet_2, wallet_3 private keys +const stackingKeys = [ + "7287ba251d44a4d3fd9276c88ce34c5c52a038955511cccaf77e61068649c17801", + "530d9f61984c888536871c6573073bdfc0058896dc1adfe9a6a10dfacadc209101", + "d655b2523bcd65e34889725c73064feb17ceb796831c0e111ba1a552b0f31b3901", +]; + +export type StackerInfo = { + authId: number; + privKey: string; + pubKey: string; + stxAddress: string; + btcAddr: string; + signerPrivKey: StacksPrivateKey; + signerPubKey: string; + client: StackingClient; +}; + +export const stackers = Object.freeze( + stackingKeys.map((privKey, i) => { + const network = new StacksDevnet(); + + const pubKey = getPublicKeyFromPrivate(privKey); + const stxAddress = getAddressFromPrivateKey( + privKey, + TransactionVersion.Testnet + ); + const signerPrivKey = createStacksPrivateKey(privKey); + const signerPubKey = getPublicKeyFromPrivate(signerPrivKey.data); + + const info: StackerInfo = { + authId: i, + privKey, + pubKey, + stxAddress, + btcAddr: publicKeyToBtcAddress(pubKey), + signerPrivKey: signerPrivKey, + signerPubKey: signerPubKey, + client: new StackingClient(stxAddress, network), + }; + return info; + }) +); + +export const getPoxInfo = () => { + const poxInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-pox-info", + [], + simnet.deployer + ); + // @ts-ignore + const data = poxInfo.result.value.data; + const typedPoxInfo = { + firstBurnchainBlockHeight: data["first-burnchain-block-height"] + .value as bigint, + minAmountUstx: data["min-amount-ustx"].value as bigint, + prepareCycleLength: data["prepare-cycle-length"].value as bigint, + rewardCycleId: data["reward-cycle-id"].value as bigint, + rewardCycleLength: data["reward-cycle-length"].value as bigint, + totalLiquidSupplyUstx: data["total-liquid-supply-ustx"].value as bigint, + }; + + return typedPoxInfo; +}; + +export const getStackingMinimum = () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacking-minimum", + [], + simnet.deployer + ); + return (response.result as UIntCV).value; +}; + +export const burnHeightToRewardCycle = (burnHeight: bigint | number) => { + const poxInfo = getPoxInfo(); + return Number( + (BigInt(burnHeight) - poxInfo.firstBurnchainBlockHeight) / + poxInfo.rewardCycleLength + ); +}; + +export const stackStx = ( + stacker: StackerInfo, + amount: bigint | number, + startBurnHeight: bigint | number, + lockPeriod: bigint | number, + maxAmount: bigint | number, + authId: bigint | number, + sender: string +) => { + const rewardCycle = burnHeightToRewardCycle(startBurnHeight); + const sigArgs = { + authId: authId, + maxAmount: maxAmount, + rewardCycle, + period: Number(lockPeriod), + topic: Pox4SignatureTopic.StackStx, + poxAddress: stacker.btcAddr, + signerPrivateKey: stacker.signerPrivKey, + }; + const signerSignature = stacker.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(stacker.signerPubKey); + + const stackStxArgs = [ + Cl.uint(amount), + poxAddressToTuple(stacker.btcAddr), + Cl.uint(startBurnHeight), + Cl.uint(lockPeriod), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + return simnet.callPublicFn(POX_CONTRACT, "stack-stx", stackStxArgs, sender); +}; + +export const stackIncrease = ( + stacker: StackerInfo, + increaseBy: bigint | number, + lockPeriod: bigint | number, + maxAmount: bigint | number, + authId: bigint | number, + sender: string +) => { + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period: Number(lockPeriod), + topic: Pox4SignatureTopic.StackIncrease, + poxAddress: stacker.btcAddr, + signerPrivateKey: stacker.signerPrivKey, + }; + const signerSignature = stacker.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(stacker.signerPubKey); + + const stackIncreaseArgs = [ + Cl.uint(increaseBy), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + return simnet.callPublicFn( + POX_CONTRACT, + "stack-increase", + stackIncreaseArgs, + sender + ); +}; + +export const stackExtend = ( + stacker: StackerInfo, + extendCount: bigint | number, + maxAmount: bigint | number, + authId: bigint | number, + sender: string +) => { + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period: Number(extendCount), + topic: Pox4SignatureTopic.StackExtend, + poxAddress: stacker.btcAddr, + signerPrivateKey: stacker.signerPrivKey, + }; + const signerSignature = stacker.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(stacker.signerPubKey); + + const stackExtendArgs = [ + Cl.uint(extendCount), + poxAddressToTuple(stacker.btcAddr), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + return simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + stackExtendArgs, + sender + ); +}; + +export const delegateStx = ( + amount: bigint | number, + delegateTo: string, + untilBurnHeight: bigint | number | null, + poxAddr: string | null, + sender: string +) => { + const delegateStxArgs = [ + Cl.uint(amount), + Cl.principal(delegateTo), + untilBurnHeight ? Cl.some(Cl.uint(untilBurnHeight)) : Cl.none(), + poxAddr ? Cl.some(poxAddressToTuple(poxAddr)) : Cl.none(), + ]; + + return simnet.callPublicFn( + POX_CONTRACT, + "delegate-stx", + delegateStxArgs, + sender + ); +}; + +export const revokeDelegateStx = (sender: string) => { + return simnet.callPublicFn(POX_CONTRACT, "revoke-delegate-stx", [], sender); +}; + +export const delegateStackStx = ( + stacker: string, + amount: bigint | number, + poxAddr: string, + startBurnHeight: bigint | number, + lockPeriod: bigint | number, + sender: string +) => { + const delegateStackStxArgs = [ + Cl.principal(stacker), + Cl.uint(amount), + poxAddressToTuple(poxAddr), + Cl.uint(startBurnHeight), + Cl.uint(lockPeriod), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "delegate-stack-stx", + delegateStackStxArgs, + sender + ); +}; + +export const delegateStackExtend = ( + stacker: string, + poxAddr: string, + extendCount: bigint | number, + sender: string +) => { + const delegateStackExtendArgs = [ + Cl.principal(stacker), + poxAddressToTuple(poxAddr), + Cl.uint(extendCount), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "delegate-stack-extend", + delegateStackExtendArgs, + sender + ); +}; + +export const delegateStackIncrease = ( + stacker: string, + poxAddr: string, + increaseBy: bigint | number, + sender: string +) => { + const delegateStackIncreaseArgs = [ + Cl.principal(stacker), + poxAddressToTuple(poxAddr), + Cl.uint(increaseBy), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "delegate-stack-increase", + delegateStackIncreaseArgs, + sender + ); +}; + +export const allowContractCaller = ( + caller: string, + untilBurnHeight: bigint | number | null, + sender: string +) => { + const args = [ + Cl.principal(caller), + untilBurnHeight ? Cl.some(Cl.uint(untilBurnHeight)) : Cl.none(), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "allow-contract-caller", + args, + sender + ); +}; + +export const disallowContractCaller = (caller: string, sender: string) => { + const args = [Cl.principal(caller)]; + return simnet.callPublicFn( + POX_CONTRACT, + "disallow-contract-caller", + args, + sender + ); +}; + +export const stackAggregationCommitIndexed = ( + stacker: StackerInfo, + rewardCycle: bigint | number, + maxAmount: bigint | number, + authId: bigint | number, + sender: string +) => { + const period = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle: Number(rewardCycle), + period: Number(period), + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: stacker.btcAddr, + signerPrivateKey: stacker.signerPrivKey, + }; + const signerSignature = stacker.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(stacker.signerPubKey); + + const args = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit-indexed", + args, + sender + ); +}; + +export const stackAggregationIncrease = ( + stacker: StackerInfo, + rewardCycle: bigint | number, + rewardCycleIndex: bigint | number, + maxAmount: bigint | number, + authId: bigint | number, + sender: string +) => { + const period = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle: Number(rewardCycle), + period: Number(period), + topic: Pox4SignatureTopic.AggregateIncrease, + poxAddress: stacker.btcAddr, + signerPrivateKey: stacker.signerPrivKey, + }; + const signerSignature = stacker.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(stacker.signerPubKey); + + const args = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(rewardCycle), + Cl.uint(rewardCycleIndex), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-increase", + args, + sender + ); +}; + +export const setSignerKeyAuthorization = ( + stacker: StackerInfo, + period: bigint | number, + rewardCycle: bigint | number, + topic: Pox4SignatureTopic, + allowed: boolean, + maxAmount: bigint | number, + authId: bigint | number +) => { + const args = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(period), + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.bufferFromHex(stacker.signerPubKey), + Cl.bool(allowed), + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "set-signer-key-authorization", + args, + stacker.stxAddress + ); +}; + +// Validate a pox-4 event and return the value of the event. +export const checkPox4Event = (event: ClarityEvent): TupleCV => { + expect(event.event).toEqual("print_event"); + expect(event.data.contract_identifier).toEqual(POX_CONTRACT); + expect(event.data.topic).toEqual("print"); + const value = (event.data.value! as ResponseOkCV).value; + return value as TupleCV; +}; + +// Validate the event that should be generated for a stack-* function, +// a delegate-stack-* function, or a delegate-stx function. +const checkStackOrDelegateEvent = ( + value: TupleCV, + name: string, + stacker: string, + balance: bigint, + locked: bigint, + burnchainUnlockHeight: bigint +) => { + const tuple = value.data; + expect(tuple["name"]).toBeAscii(name); + expect(tuple["stacker"]).toBePrincipal(stacker); + expect(tuple["balance"]).toBeUint(balance); + expect(tuple["locked"]).toBeUint(locked); + expect(tuple["burnchain-unlock-height"]).toBeUint(burnchainUnlockHeight); +}; + +// Validate the event that should be generated for a delegate-stx function. +export const checkDelegateStxEvent = ( + event: ClarityEvent, + stacker: string, + balance: bigint, + locked: bigint, + burnchainUnlockHeight: bigint, + amountUstx: bigint, + delegateTo: string, + poxAddr: string, + unlockBurnHeight: bigint +) => { + let value = checkPox4Event(event); + checkStackOrDelegateEvent( + value, + "delegate-stx", + stacker, + balance, + locked, + burnchainUnlockHeight + ); + const tuple = value.data; + const data = (tuple["data"] as TupleCV).data; + expect(data["amount-ustx"]).toBeUint(amountUstx); + expect(data["delegate-to"]).toBePrincipal(delegateTo); + if (poxAddr) { + expect(data["pox-addr"]).toBeSome(poxAddressToTuple(poxAddr)); + } else { + expect(data["pox-addr"]).toBeNone(); + } + if (unlockBurnHeight) { + expect(data["unlock-burn-height"]).toBeSome(Cl.uint(unlockBurnHeight)); + } else { + expect(data["unlock-burn-height"]).toBeNone(); + } +}; + +// Get the stacking state for a stacker. +export const getStackerInfo = (stacker: string) => { + return simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacker-info", + [Cl.principal(stacker)], + simnet.deployer + ); +}; diff --git a/contrib/boot-contracts-unit-tests/tests/misc.test.ts b/contrib/boot-contracts-unit-tests/tests/misc.test.ts new file mode 100644 index 0000000000..d50f2ef6d3 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tests/misc.test.ts @@ -0,0 +1,1725 @@ +import { beforeEach, describe, expect, it } from "vitest"; +import { Cl, ClarityType } from "@stacks/transactions"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { + ERRORS, + POX_CONTRACT, + StackerInfo, + allowContractCaller, + delegateStackStx, + delegateStx, + getStackingMinimum, + setSignerKeyAuthorization, + stackAggregationCommitIndexed, + stackStx, + stackers, +} from "./helpers"; + +const accounts = simnet.getAccounts(); +const deployer = accounts.get("deployer")!; +const address1 = accounts.get("wallet_1")!; +const address3 = accounts.get("wallet_3")!; + +beforeEach(() => { + simnet.setEpoch("3.0"); +}); + +describe("test `set-burnchain-parameters`", () => { + it("sets the parameters correctly", () => { + const response = simnet.callPublicFn( + POX_CONTRACT, + "set-burnchain-parameters", + [Cl.uint(100), Cl.uint(5), Cl.uint(20), Cl.uint(6)], + address1 + ); + expect(response.result).toBeOk(Cl.bool(true)); + + const fbbh = simnet.getDataVar( + POX_CONTRACT, + "first-burnchain-block-height" + ); + expect(fbbh).toBeUint(100); + + const ppcl = simnet.getDataVar(POX_CONTRACT, "pox-prepare-cycle-length"); + expect(ppcl).toBeUint(5); + + const prcl = simnet.getDataVar(POX_CONTRACT, "pox-reward-cycle-length"); + expect(prcl).toBeUint(20); + + const configured = simnet.getDataVar(POX_CONTRACT, "configured"); + expect(configured).toBeBool(true); + }); + + it("cannot be called twice", () => { + simnet.callPublicFn( + POX_CONTRACT, + "set-burnchain-parameters", + [Cl.uint(100), Cl.uint(5), Cl.uint(20), Cl.uint(6)], + address1 + ); + const response = simnet.callPublicFn( + POX_CONTRACT, + "set-burnchain-parameters", + [Cl.uint(101), Cl.uint(6), Cl.uint(21), Cl.uint(7)], + address1 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_NOT_ALLOWED)); + }); +}); + +describe("test `burn-height-to-reward-cycle`", () => { + it("returns the correct reward cycle", () => { + let response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(1)], + address1 + ); + expect(response.result).toBeUint(0); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(2099)], + address1 + ); + expect(response.result).toBeUint(1); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(2100)], + address1 + ); + expect(response.result).toBeUint(2); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(2101)], + address1 + ); + expect(response.result).toBeUint(2); + }); + + it("returns the correct reward cycle with modified configuration", () => { + simnet.callPublicFn( + POX_CONTRACT, + "set-burnchain-parameters", + [Cl.uint(100), Cl.uint(5), Cl.uint(20), Cl.uint(6)], + address1 + ); + + expect(() => + simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(1)], + address1 + ) + ).toThrowError(); + + expect(() => + simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(99)], + address1 + ) + ).toThrowError(); + + let response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(100)], + address1 + ); + expect(response.result).toBeUint(0); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(101)], + address1 + ); + expect(response.result).toBeUint(0); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(119)], + address1 + ); + expect(response.result).toBeUint(0); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(120)], + address1 + ); + expect(response.result).toBeUint(1); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(121)], + address1 + ); + expect(response.result).toBeUint(1); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(140)], + address1 + ); + expect(response.result).toBeUint(2); + }); +}); + +describe("test `reward-cycle-to-burn-height`", () => { + it("returns the correct burn height", () => { + let response = simnet.callReadOnlyFn( + POX_CONTRACT, + "reward-cycle-to-burn-height", + [Cl.uint(0)], + address1 + ); + expect(response.result).toBeUint(0); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "reward-cycle-to-burn-height", + [Cl.uint(1)], + address1 + ); + expect(response.result).toBeUint(1050); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "reward-cycle-to-burn-height", + [Cl.uint(2)], + address1 + ); + expect(response.result).toBeUint(2100); + + expect(() => + simnet.callReadOnlyFn( + POX_CONTRACT, + "reward-cycle-to-burn-height", + [Cl.uint(340282366920938463463374607431768211455n)], + address1 + ) + ).toThrowError(); + }); +}); + +describe("test `current-pox-reward-cycle`", () => { + it("returns the correct reward cycle", () => { + let response = simnet.callReadOnlyFn( + POX_CONTRACT, + "current-pox-reward-cycle", + [], + address1 + ); + expect(response.result).toBeUint(0); + + simnet.mineEmptyBlocks(2099); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "current-pox-reward-cycle", + [], + address1 + ); + expect(response.result).toBeUint(1); + + simnet.mineEmptyBlock(); + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "current-pox-reward-cycle", + [], + address1 + ); + expect(response.result).toBeUint(2); + }); +}); + +describe("test `get-stacker-info`", () => { + it("returns none when principal is not stacked", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacker-info", + [Cl.principal(address1)], + address1 + ); + expect(response.result).toBeNone(); + }); + + it("returns info before stacked", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() * 2n; + let stackResponse = stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + address1 + ); + expect(stackResponse.result.type).toBe(ClarityType.ResponseOk); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacker-info", + [Cl.principal(stacker.stxAddress)], + address1 + ); + expect(response.result).toBeSome( + Cl.tuple({ + "delegated-to": Cl.none(), + "first-reward-cycle": Cl.uint(1), + "lock-period": Cl.uint(6), + "pox-addr": poxAddressToTuple(stacker.btcAddr), + "reward-set-indexes": Cl.list([ + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + ]), + }) + ); + }); + + it("returns info while stacked", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() * 2n; + let stackResponse = stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + address1 + ); + expect(stackResponse.result.type).toBe(ClarityType.ResponseOk); + simnet.mineEmptyBlocks(2100); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacker-info", + [Cl.principal(stacker.stxAddress)], + address1 + ); + expect(response.result).toBeSome( + Cl.tuple({ + "delegated-to": Cl.none(), + "first-reward-cycle": Cl.uint(1), + "lock-period": Cl.uint(6), + "pox-addr": poxAddressToTuple(stacker.btcAddr), + "reward-set-indexes": Cl.list([ + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + ]), + }) + ); + }); + + it("returns none after stacking expired", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() * 2n; + let stackResponse = stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + address1 + ); + expect(stackResponse.result.type).toBe(ClarityType.ResponseOk); + simnet.mineEmptyBlocks(7350); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacker-info", + [Cl.principal(stacker.stxAddress)], + address1 + ); + expect(response.result).toBeNone(); + }); +}); + +describe("test `check-caller-allowed`", () => { + it("returns true when called directly", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-caller-allowed", + [], + address1 + ); + expect(response.result).toBeBool(true); + }); + + it("returns false when called indirectly by unapproved caller", () => { + const response = simnet.callReadOnlyFn( + "indirect", + "check-caller-allowed", + [], + address1 + ); + + expect(response.result).toBeBool(false); + }); + + it("returns true when called indirectly by approved caller", () => { + allowContractCaller(`${deployer}.indirect`, null, address1); + const response = simnet.callReadOnlyFn( + "indirect", + "check-caller-allowed", + [], + address1 + ); + + expect(response.result).toBeBool(true); + }); + + it("returns false when called indirectly by approved caller which has expired", () => { + allowContractCaller(`${deployer}.indirect`, 10n, address1); + + let response = simnet.callReadOnlyFn( + "indirect", + "check-caller-allowed", + [], + address1 + ); + + expect(response.result).toBeBool(true); + + // mine 11 blocks to expire the caller + simnet.mineEmptyBlocks(11); + + response = simnet.callReadOnlyFn( + "indirect", + "check-caller-allowed", + [], + address1 + ); + + expect(response.result).toBeBool(false); + }); +}); + +describe("test `get-reward-set-size`", () => { + it("returns 0 when no stacking has occurred", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-size", + [Cl.uint(0)], + address1 + ); + expect(response.result).toBeUint(0); + }); + + it("returns number of stackers", () => { + const amount = getStackingMinimum() * 2n; + + stackers.forEach((stacker) => { + const { result } = stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + expect(result).toHaveClarityType(ClarityType.ResponseOk); + }); + + const responseCycle1 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-size", + [Cl.uint(1)], + address1 + ); + expect(responseCycle1.result).toBeUint(3); + + const responseCycle7 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-size", + [Cl.uint(7)], + address1 + ); + expect(responseCycle7.result).toBeUint(0); + }); + + it("returns number of uniq pox address", () => { + const amount = getStackingMinimum() * 2n; + + stackers.forEach((_stacker) => { + const stacker: StackerInfo = { + ..._stacker, + btcAddr: stackers[0].btcAddr, + }; + const { result } = stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + expect(result).toHaveClarityType(ClarityType.ResponseOk); + }); + + const responseCycle1 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-size", + [Cl.uint(1)], + address1 + ); + expect(responseCycle1.result).toBeUint(3); // should it be 1? + }); +}); + +describe("test `get-total-ustx-stacked`", () => { + it("returns 0 when no stacking has occurred", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(0)], + address1 + ); + expect(response.result).toBeUint(0); + }); + + it("returns total amount stacked", () => { + const amount = getStackingMinimum() * 2n; + + stackers.forEach((stacker) => { + const { result } = stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + expect(result).toHaveClarityType(ClarityType.ResponseOk); + }); + + const responseCycle1 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(1)], + address1 + ); + expect(responseCycle1.result).toBeUint(amount * 3n); + }); + + it("returns 0 in the cycle before stacking starts", () => { + const amount = getStackingMinimum() * 2n; + + // stacking txs sent in cycle 0, so stackers will be start in cycle 1 + stackers.forEach((stacker) => { + stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + }); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(0)], + address1 + ); + expect(response.result).toBeUint(0); + }); + + it("returns total amount stacked", () => { + const amount = getStackingMinimum() * 2n; + + stackers.forEach((stacker) => { + stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + }); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(1)], + address1 + ); + expect(response.result).toBeUint(amount * 3n); + }); + + it("expires stacking after the stacking duration has finsihed", () => { + const amount = getStackingMinimum() * 2n; + + stackers.forEach((stacker, i) => { + const { result } = stackStx( + stacker, + amount, + 1000, + // wallet_1 will expire after 2 cycles, wallet_2 after 4, wallet_3 after 6 + (i + 1) * 2, + amount, + stacker.authId, + stacker.stxAddress + ); + expect(result).toHaveClarityType(ClarityType.ResponseOk); + }); + + const responseCycle3 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(3)], + address1 + ); + expect(responseCycle3.result).toBeUint(amount * 2n); + + const responseCycle5 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(5)], + address1 + ); + expect(responseCycle5.result).toBeUint(amount * 1n); + + const responseCycle7 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(7)], + address1 + ); + expect(responseCycle7.result).toBeUint(0); + }); +}); + +describe("test `get-reward-set-pox-address`", () => { + it("returns none when there is no stacker", () => { + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(0), Cl.uint(0)], + address1 + ); + expect(result).toBeNone(); + }); + + it("returns pox address for a stacker", () => { + const amount = getStackingMinimum() * 2n; + stackers.forEach((stacker) => { + stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + }); + + const responseStacker0 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(1), Cl.uint(0)], + address1 + ); + expect(responseStacker0.result).toBeSome( + Cl.tuple({ + "pox-addr": poxAddressToTuple(stackers[0].btcAddr), + signer: Cl.bufferFromHex(stackers[0].signerPubKey), + stacker: Cl.some(Cl.principal(stackers[0].stxAddress)), + "total-ustx": Cl.uint(amount), + }) + ); + const responseStacker1 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(1), Cl.uint(1)], + address1 + ); + expect(responseStacker1.result).toBeSome( + Cl.tuple({ + "pox-addr": poxAddressToTuple(stackers[1].btcAddr), + signer: Cl.bufferFromHex(stackers[1].signerPubKey), + stacker: Cl.some(Cl.principal(stackers[1].stxAddress)), + "total-ustx": Cl.uint(amount), + }) + ); + }); +}); + +describe("test `get-stacking-minimum`", () => { + it("returns the correct minimum amount", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacking-minimum", + [], + address1 + ); + expect(response.result).toBeUint(125000000000); + }); +}); + +describe("test `check-pox-addr-version`", () => { + it("returns true for a valid version", () => { + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-version", + [poxAddressToTuple(stackers[0].btcAddr).data.version], + address1 + ); + expect(result).toBeBool(true); + }); + + it("returns false for an invalid version (> 6)", () => { + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-version", + [Cl.buffer(Buffer.from([7]))], + address1 + ); + expect(result).toBeBool(false); + }); +}); + +describe("test `check-pox-addr-hashbytes`", () => { + it("returns true for a valid address", () => { + const segwitAddress = poxAddressToTuple( + "36op6KLxdjBeBXnkNPi59UDTT2yZZGBYDm" + ); + + const segwitCheck = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [segwitAddress.data.version, segwitAddress.data.hashbytes], + address1 + ); + expect(segwitCheck.result).toBeBool(true); + + const taprootAddress = poxAddressToTuple( + "bc1q82mfyran6u3y8r877vgkje45wlmvh85c7su3ljww9jv762znmrasn5ce59" + ); + + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [taprootAddress.data.version, taprootAddress.data.hashbytes], + address1 + ); + expect(result).toBeBool(true); + }); +}); + +describe("test `check-pox-lock-period`", () => { + it("returns true for a valid lock period", () => { + for (let i = 1; i <= 12; i++) { + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-lock-period", + [Cl.uint(i)], + address1 + ); + expect(result).toBeBool(true); + } + }); + + it("returns false lock period of 0", () => { + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-lock-period", + [Cl.uint(0)], + address1 + ); + expect(result).toBeBool(false); + }); + + it("returns false lock period of 13", () => { + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-lock-period", + [Cl.uint(13)], + address1 + ); + expect(result).toBeBool(false); + }); +}); + +describe("test `can-stack-stx` and `minimal-can-stack-stx`", () => { + it("returns true for a valid stacker", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() * 2n; + const canStackArgs = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(amount), + Cl.uint(1), // first reward cycle + Cl.uint(6), // lock period + ]; + + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "can-stack-stx", + canStackArgs, + address1 + ); + expect(result).toBeOk(Cl.bool(true)); + }); + + it("returns error if amount is too low", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() / 2n; + const canStackArgs = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(amount), + Cl.uint(1), // first reward cycle + Cl.uint(6), // lock period + ]; + + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "can-stack-stx", + canStackArgs, + address1 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_THRESHOLD_NOT_MET)); + }); + + it("returns error if period is too low or to high", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() * 2n; + const canStackArgsTooLow = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(amount), + Cl.uint(1), // first reward cycle + Cl.uint(0), // lock period + ]; + + const { result: resultTooLow } = simnet.callReadOnlyFn( + POX_CONTRACT, + "can-stack-stx", + canStackArgsTooLow, + address1 + ); + expect(resultTooLow).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + + const canStackArgsTooHigh = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(amount), + Cl.uint(1), // first reward cycle + Cl.uint(13), // lock period + ]; + + const { result: resultTooHigh } = simnet.callReadOnlyFn( + POX_CONTRACT, + "can-stack-stx", + canStackArgsTooHigh, + address1 + ); + expect(resultTooHigh).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + }); + + it("returns error if pox address is invalid", () => { + const addressTupleWrongVersion = Cl.tuple({ + hashbytes: Cl.buffer( + Buffer.from("j89046x7zv6pm4n00qgqp505nvljnfp6xfznyw") + ), + version: Cl.buffer(Buffer.from([7])), + }); + const amount = getStackingMinimum() * 2n; + const canStackArgs = [ + addressTupleWrongVersion, + Cl.uint(amount), + Cl.uint(1), // first reward cycle + Cl.uint(6), // lock period + ]; + const { result: resultWrongVersion } = simnet.callReadOnlyFn( + POX_CONTRACT, + "can-stack-stx", + canStackArgs, + address1 + ); + expect(resultWrongVersion).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_POX_ADDRESS) + ); + }); +}); + +describe("test `check-pox-addr-hashbytes`", () => { + it("returns true for a valid address", () => { + let poxAddr = poxAddressToTuple(stackers[0].btcAddr); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [poxAddr.data.version, poxAddr.data.hashbytes], + address1 + ); + + expect(response.result).toBeBool(true); + }); + + it("returns false when a 20 byte hash is too short", () => { + let version = Cl.bufferFromHex("01"); + let hashbytes = Cl.bufferFromHex("deadbeef"); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [version, hashbytes], + address1 + ); + + expect(response.result).toBeBool(false); + }); + + it("returns false when a 20 byte hash is too long", () => { + let version = Cl.bufferFromHex("04"); + let hashbytes = Cl.bufferFromHex("deadbeefdeadbeefdeadbeef"); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [version, hashbytes], + address1 + ); + + expect(response.result).toBeBool(false); + }); + + it("returns false when a 32 byte hash is too short", () => { + let version = Cl.bufferFromHex("05"); + let hashbytes = Cl.bufferFromHex("deadbeefdeadbeefdead"); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [version, hashbytes], + address1 + ); + + expect(response.result).toBeBool(false); + }); + + it("returns false when a 32 byte hash is too long", () => { + let version = Cl.bufferFromHex("06"); + let hashbytes = Cl.bufferFromHex("deadbeefdeadbeefdeadbeefdeadbeef01"); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [version, hashbytes], + address1 + ); + + expect(response.result).toBeBool(false); + }); + + it("returns false when the version is too high", () => { + let version = Cl.bufferFromHex("07"); + let hashbytes = Cl.bufferFromHex("deadbeefdeadbeefdeadbeefdeadbeef"); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [version, hashbytes], + address1 + ); + + expect(response.result).toBeBool(false); + }); +}); + +describe("test `minimal-can-stack-stx`", () => { + it("returns true for valid args", () => { + const poxAddr = poxAddressToTuple(stackers[0].btcAddr); + const amount = 1000n; + const firstCycle = 1n; + const lockPeriod = 6n; + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "minimal-can-stack-stx", + [poxAddr, Cl.uint(amount), Cl.uint(firstCycle), Cl.uint(lockPeriod)], + address1 + ); + + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns false for a 0 amount", () => { + const poxAddr = poxAddressToTuple(stackers[0].btcAddr); + const amount = 0n; + const firstCycle = 1n; + const lockPeriod = 6n; + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "minimal-can-stack-stx", + [poxAddr, Cl.uint(amount), Cl.uint(firstCycle), Cl.uint(lockPeriod)], + address1 + ); + + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INVALID_AMOUNT)); + }); + + it("returns false for an invalid lock period", () => { + const poxAddr = poxAddressToTuple(stackers[0].btcAddr); + const amount = 1000n; + const firstCycle = 1n; + const lockPeriod = 13n; + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "minimal-can-stack-stx", + [poxAddr, Cl.uint(amount), Cl.uint(firstCycle), Cl.uint(lockPeriod)], + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + }); + + it("returns false for a bad address version", () => { + const poxAddr = poxAddressToTuple(stackers[0].btcAddr); + poxAddr.data["version"] = Cl.bufferFromHex("0a"); + const amount = 1000n; + const firstCycle = 1n; + const lockPeriod = 6n; + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "minimal-can-stack-stx", + [poxAddr, Cl.uint(amount), Cl.uint(firstCycle), Cl.uint(lockPeriod)], + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_POX_ADDRESS) + ); + }); + + it("returns false for a bad address hashbytes", () => { + const poxAddr = poxAddressToTuple(stackers[0].btcAddr); + poxAddr.data["hashbytes"] = Cl.bufferFromHex("deadbeef"); + const amount = 1000n; + const firstCycle = 1n; + const lockPeriod = 6n; + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "minimal-can-stack-stx", + [poxAddr, Cl.uint(amount), Cl.uint(firstCycle), Cl.uint(lockPeriod)], + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_POX_ADDRESS) + ); + }); +}); + +describe("test `verify-signer-key-sig`", () => { + it("returns `(ok true)` for a valid signature", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns `(ok true)` for a valid prior authorization", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + + simnet.callPublicFn( + POX_CONTRACT, + "set-signer-key-authorization", + [ + poxAddr, + Cl.uint(period), + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.bufferFromHex(account.signerPubKey), + Cl.bool(true), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + account.stxAddress + ); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.none(), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns an error if the amount is too high", () => { + const account = stackers[0]; + const maxAmount = getStackingMinimum(); + const amount = maxAmount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH) + ); + }); + + it("returns an error for a used authorization", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + + simnet.callPrivateFn( + POX_CONTRACT, + "consume-signer-key-authorization", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_SIGNER_AUTH_USED)); + }); + + it("returns an error for an invalid signature", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const invalidSignature = signerSignature.slice(0, -2); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(invalidSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_INVALID_SIGNATURE_RECOVER) + ); + }); + + it("returns an error for a signature that does not match", () => { + const account = stackers[0]; + const account2 = stackers[1]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account2.signerPrivKey, + }; + const signerSignature = account2.client.signPoxSignature(sigArgs); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_INVALID_SIGNATURE_PUBKEY) + ); + }); + + it("returns an error if not signature is passed and there is no prior authorization", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.none(), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_NOT_ALLOWED)); + }); +}); + +describe("test `consume-signer-key-authorization`", () => { + it("returns `(ok true)` for a valid signature", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + + const response = simnet.callPrivateFn( + POX_CONTRACT, + "consume-signer-key-authorization", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns an error for a used authorization", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + + simnet.callPrivateFn( + POX_CONTRACT, + "consume-signer-key-authorization", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + + const response = simnet.callPrivateFn( + POX_CONTRACT, + "consume-signer-key-authorization", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_SIGNER_AUTH_USED)); + }); +}); + +describe("test `set-signer-key-authorization`", () => { + it("returns `(ok true)` for a valid authorization", () => { + const stacker = stackers[0]; + const period = 1; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = true; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + const response = setSignerKeyAuthorization( + stacker, + period, + rewardCycle, + topic, + allowed, + maxAmount, + authId + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns `(ok false)` for a valid deauthorization", () => { + const stacker = stackers[0]; + const period = 1; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = false; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + const response = setSignerKeyAuthorization( + stacker, + period, + rewardCycle, + topic, + allowed, + maxAmount, + authId + ); + expect(response.result).toBeOk(Cl.bool(false)); + }); + + it("cannot be called indirectly by an unauthorized caller", () => { + const stacker = stackers[0]; + const period = 1; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = false; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + const args = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(period), + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.bufferFromHex(stacker.signerPubKey), + Cl.bool(allowed), + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + "indirect", + "set-signer-key-authorization", + args, + stacker.stxAddress + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_NOT_ALLOWED)); + }); + + it("can be called indirectly by an authorized caller", () => { + const stacker = stackers[0]; + const period = 1; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = true; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + allowContractCaller(`${deployer}.indirect`, null, stacker.stxAddress); + + const args = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(period), + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.bufferFromHex(stacker.signerPubKey), + Cl.bool(allowed), + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + "indirect", + "set-signer-key-authorization", + args, + stacker.stxAddress + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("cannot be called by a different principal", () => { + const stacker = stackers[0]; + const period = 1; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = true; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + const args = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(period), + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.bufferFromHex(stacker.signerPubKey), + Cl.bool(allowed), + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + POX_CONTRACT, + "set-signer-key-authorization", + args, + address3 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_NOT_ALLOWED)); + }); + + it("returns an error for a period of 0", () => { + const stacker = stackers[0]; + const period = 0; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = true; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + const response = setSignerKeyAuthorization( + stacker, + period, + rewardCycle, + topic, + allowed, + maxAmount, + authId + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + }); + + it("returns an error for a reward cycle in the past", () => { + const stacker = stackers[0]; + const period = 1; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = true; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + simnet.mineEmptyBlocks(1050 * 2); + + const response = setSignerKeyAuthorization( + stacker, + period, + rewardCycle, + topic, + allowed, + maxAmount, + authId + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_INVALID_REWARD_CYCLE)); + }); +}); + +describe("test `get-num-reward-set-pox-addresses`", () => { + it("returns 0 when there are no stackers", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-num-reward-set-pox-addresses", + [Cl.uint(1)], + address1 + ); + expect(response.result).toBeUint(0); + }); + + it("returns the number of stackers", () => { + const amount = getStackingMinimum() * 2n; + stackers.forEach((stacker) => { + stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + }); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-num-reward-set-pox-addresses", + [Cl.uint(1)], + address1 + ); + expect(response.result).toBeUint(stackers.length); + }); + + it("returns the number of stackers for a specific reward cycle", () => { + const amount = getStackingMinimum() * 2n; + stackers.forEach((stacker) => { + stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + }); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-num-reward-set-pox-addresses", + [Cl.uint(2)], + address1 + ); + expect(response.result).toBeUint(stackers.length); + }); + + it("returns 0 when there are expired stackers", () => { + const amount = getStackingMinimum() * 2n; + stackers.forEach((stacker) => { + stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + }); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-num-reward-set-pox-addresses", + [Cl.uint(8)], + address1 + ); + expect(response.result).toBeUint(0); + }); + + it("handles delegated stacking", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address3, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address3 + ); + + stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address3 + ); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-num-reward-set-pox-addresses", + [Cl.uint(1)], + address1 + ); + expect(response.result).toBeUint(1); + }); +}); diff --git a/contrib/boot-contracts-unit-tests/tests/pool-delegate.test.ts b/contrib/boot-contracts-unit-tests/tests/pool-delegate.test.ts new file mode 100644 index 0000000000..52ea0ae95c --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tests/pool-delegate.test.ts @@ -0,0 +1,2867 @@ +import { assert, beforeEach, describe, expect, it } from "vitest"; + +import { + Cl, + ClarityType, + ResponseCV, + SomeCV, + TupleCV, + UIntCV, + cvToString, +} from "@stacks/transactions"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { + ERRORS, + POX_CONTRACT, + allowContractCaller, + delegateStackExtend, + delegateStackIncrease, + delegateStackStx, + delegateStx, + getPoxInfo, + getStackerInfo, + getStackingMinimum, + stackAggregationCommitIndexed, + stackAggregationIncrease, + stackStx, + stackers, +} from "./helpers"; + +const accounts = simnet.getAccounts(); +const deployer = accounts.get("deployer")!; +const address1 = accounts.get("wallet_1")!; +const address2 = accounts.get("wallet_2")!; +const address3 = accounts.get("wallet_3")!; + +beforeEach(() => { + simnet.setEpoch("3.0"); +}); + +describe("test `get-check-delegation`", () => { + it("returns none when principal is not delegated", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-check-delegation", + [Cl.principal(address1)], + address1 + ); + expect(response.result).toBeNone(); + }); + + it("returns info after delegation", () => { + const amount = getStackingMinimum() * 2n; + + const untilBurnHeight = 10; + const delegateResponse = delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.events).toHaveLength(1); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-check-delegation", + [Cl.principal(address1)], + address1 + ); + expect(delegateInfo.result).toBeSome( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal( + "ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG" + ), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.some(Cl.uint(untilBurnHeight)), + }) + ); + }); + + it("does not expire if no burn height limit is set", () => { + const amount = getStackingMinimum() * 2n; + + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-check-delegation", + [Cl.principal(address1)], + address1 + ); + + simnet.mineEmptyBlocks(10_000); + expect(delegateInfo.result).toBeSome( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal( + "ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG" + ), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.none(), + }) + ); + }); + + it("returns none after burn height expiration", () => { + const amount = getStackingMinimum() * 2n; + simnet.mineEmptyBlock(); + + const untilBurnHeight = 10; + delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + + simnet.mineEmptyBlocks(2 + untilBurnHeight - simnet.blockHeight); + // a stacks block height of 12 means a burnchain block height of 11 + assert(simnet.blockHeight === 12); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-check-delegation", + [Cl.principal(address1)], + address1 + ); + expect(delegateInfo.result).toBeNone(); + }); +}); + +describe("test `get-delegation-info`", () => { + it("returns none when principal is not delegated", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-delegation-info", + [Cl.principal(address1)], + address1 + ); + expect(response.result).toBeNone(); + }); + + it("returns info after delegation", () => { + const amount = getStackingMinimum() * 2n; + + const untilBurnHeight = 10; + const delegateResponse = delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.events).toHaveLength(1); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-delegation-info", + [Cl.principal(address1)], + address1 + ); + expect(delegateInfo.result).toBeSome( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal( + "ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG" + ), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.some(Cl.uint(untilBurnHeight)), + }) + ); + }); + + it("does not expire if no burn height limit is set", () => { + const amount = getStackingMinimum() * 2n; + + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-delegation-info", + [Cl.principal(address1)], + address1 + ); + + simnet.mineEmptyBlocks(10_000); + expect(delegateInfo.result).toBeSome( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal( + "ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG" + ), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.none(), + }) + ); + }); + + it("returns none after burn height expiration", () => { + const amount = getStackingMinimum() * 2n; + simnet.mineEmptyBlock(); + + const untilBurnHeight = 10; + delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + + simnet.mineEmptyBlocks(2 + untilBurnHeight - simnet.blockHeight); + // a stacks block height of 12 means a burnchain block height of 11 + assert(simnet.blockHeight === 12); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-delegation-info", + [Cl.principal(address1)], + address1 + ); + expect(delegateInfo.result).toBeNone(); + }); +}); + +describe("test `get-allowance-contract-callers`", () => { + it("returns `none` when not allowed", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-allowance-contract-callers", + [Cl.principal(address1), Cl.contractPrincipal(deployer, "indirect")], + address1 + ); + expect(response.result).toBeNone(); + }); + + it("returns `(some none)` when allowed indefinitely", () => { + allowContractCaller(`${deployer}.indirect`, null, address1); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-allowance-contract-callers", + [Cl.principal(address1), Cl.contractPrincipal(deployer, "indirect")], + address1 + ); + expect(delegateInfo.result).toBeSome( + Cl.tuple({ + "until-burn-ht": Cl.none(), + }) + ); + }); + + it("returns `(some (some X))` when allowed until burn height X", () => { + const untilBurnHeight = 10; + allowContractCaller(`${deployer}.indirect`, untilBurnHeight, address1); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-allowance-contract-callers", + [Cl.principal(address1), Cl.contractPrincipal(deployer, "indirect")], + address1 + ); + expect(delegateInfo.result).toBeSome( + Cl.tuple({ + "until-burn-ht": Cl.some(Cl.uint(untilBurnHeight)), + }) + ); + }); + + it("returns `none` when a different caller is allowed", () => { + allowContractCaller(`${deployer}.not-indirect`, null, address1); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-allowance-contract-callers", + [Cl.principal(address1), Cl.contractPrincipal(deployer, "indirect")], + address1 + ); + expect(response.result).toBeNone(); + }); +}); + +describe("test `delegate-stack-stx`", () => { + it("does not delegate if principal is not delegated", () => { + const amount = getStackingMinimum() * 2n; + const { result } = delegateStackStx( + address2, + amount, + stackers[0].btcAddr, + 1000, + 6, + address1 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED)); + }); + + it("can call delegate-stack-stx", () => { + const amount = getStackingMinimum() * 2n; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amount), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(7350), + }) + ); + }); + + it("returns an error for stacking too early", () => { + const amount = getStackingMinimum() * 2n; + const startBurnHeight = 3000; + const lockPeriod = 6; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_INVALID_START_BURN_HEIGHT)); + }); + + it("cannot be called indirectly by an unapproved caller", () => { + const amount = getStackingMinimum() * 2n; + const startBurnHeight = 1000; + const lockPeriod = 6; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + + const response = simnet.callPublicFn( + "indirect", + "delegate-stack-stx", + [ + Cl.principal(address1), + Cl.uint(amount), + poxAddressToTuple(stackers[0].btcAddr), + Cl.uint(startBurnHeight), + Cl.uint(lockPeriod), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly by an approved caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const startBurnHeight = 1000; + const lockPeriod = 6; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + allowContractCaller(`${deployer}.indirect`, null, address2); + + const response = simnet.callPublicFn( + "indirect", + "delegate-stack-stx", + [ + Cl.principal(address1), + Cl.uint(amount), + poxAddressToTuple(account.btcAddr), + Cl.uint(startBurnHeight), + Cl.uint(lockPeriod), + ], + address2 + ); + expect(response.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amount), + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(7350), + }) + ); + }); + + it("returns an error if not delegated", () => { + const amount = getStackingMinimum() * 2n; + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED)); + }); + + it("returns an error if delegated to someone else", () => { + const amount = getStackingMinimum() * 2n; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + 1000, + 6, + address3 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED)); + }); + + it("returns an error if stacking more than delegated", () => { + const amount = getStackingMinimum() * 2n; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount + 1n, + stackers[0].btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_DELEGATION_TOO_MUCH_LOCKED)); + }); + + it("returns an error if stacking to a different pox address", () => { + const amount = getStackingMinimum() * 2n; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stackers[1].btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_DELEGATION_POX_ADDR_REQUIRED)); + }); + + it("can call delegate-stack-stx when no pox address was set", () => { + const amount = getStackingMinimum() * 2n; + delegateStx(amount, address2, null, null, address1); + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amount), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(7350), + }) + ); + }); + + it("returns an error if stacking beyond the delegation height", () => { + const amount = getStackingMinimum() * 2n; + delegateStx(amount, address2, 2000, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_DELEGATION_EXPIRES_DURING_LOCK)); + }); + + it("returns an error if stacker is already stacked", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() * 2n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + delegateStackStx( + address1, + amount, + stacker.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + const { result } = delegateStackStx( + address1, + amount, + stacker.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_ALREADY_STACKED)); + }); + + it("returns an error if stacker does not have enough unlocked stacks", () => { + const stacker = stackers[0]; + const amount = + simnet.getAssetsMap().get("STX")?.get(stacker.stxAddress)! + 10n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stacker.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS)); + }); + + it("returns an error if amount is 0", () => { + const stacker = stackers[0]; + const amount = 0; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stacker.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INVALID_AMOUNT)); + }); +}); + +describe("test `stack-aggregation-commit-indexed`", () => { + it("returns `(ok uint)` on success", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + const { result } = delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amount), + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(7350), + }) + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit-indexed", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeOk(Cl.uint(0)); + }); + + it("returns an error when there is no partially stacked STX", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit-indexed", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL) + ); + }); + + it("returns an error when called by an unauthorized caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + "indirect", + "stack-aggregation-commit-indexed", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly by an authorized caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + allowContractCaller(`${deployer}.indirect`, null, address2); + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + "indirect", + "stack-aggregation-commit-indexed", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeOk(Cl.uint(0)); + }); + + it("returns an error when called with no signature or prior authorization", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const authId = 1; + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit-indexed", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.none(), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_NOT_ALLOWED)); + }); + + it("returns an error when the stacking threshold is not met", () => { + const account = stackers[0]; + const amount = getStackingMinimum() / 2n; + const maxAmount = amount * 4n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + let response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit-indexed", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_THRESHOLD_NOT_MET) + ); + }); +}); + +describe("test `stack-aggregation-commit`", () => { + it("returns `(ok uint)` on success", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + const { result } = delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amount), + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(7350), + }) + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns an error when there is no partially stacked STX", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL) + ); + }); + + it("returns an error when called by an unauthorized caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + "indirect", + "stack-aggregation-commit", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly by an authorized caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + allowContractCaller(`${deployer}.indirect`, null, address2); + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + "indirect", + "stack-aggregation-commit", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns an error when called with no signature or prior authorization", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const authId = 1; + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.none(), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_NOT_ALLOWED)); + }); + + it("returns an error when the stacking threshold is not met", () => { + const account = stackers[0]; + const amount = getStackingMinimum() / 2n; + const maxAmount = amount * 4n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + let response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_THRESHOLD_NOT_MET) + ); + }); +}); + +describe("test `delegate-stack-increase`", () => { + it("returns `(ok )` on success", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(maxAmount), + }) + ); + }); + + it("can be called after committing", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(maxAmount), + }) + ); + + // the amount in the reward set should not update until after + // the delegator calls `stack-aggregation-increase` + let info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + let tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + + response = stackAggregationIncrease( + account, + rewardCycle, + index, + maxAmount, + authId, + address2 + ); + expect(response.result).toBeOk(Cl.bool(true)); + + // check that the amount was increased + info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(maxAmount); + }); + + it("cannot be called if not delegated", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + + // Arithmetic underflow is not caught gracefully, so this triggers a runtime error. + // Preferably, it would return a `ERR_STACKING_NOT_DELEGATED` error. + expect(() => + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ) + ).toThrow(); + }); + + it("cannot be called if not stacked", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + + // Arithmetic underflow is not caught gracefully, so this triggers a runtime error. + // Preferably, it would return a `ERR_STACKING_NOT_DELEGATED` error. + expect(() => + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ) + ).toThrow(); + }); + + it("cannot be called in last cycle of delegation", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + // mine enough blocks to reach the last cycle of the delegation + simnet.mineEmptyBlocks(6 * cycleLength); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + }); + + it("cannot be called after delegation has expired", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + // mine enough blocks to end the delegation + simnet.mineEmptyBlocks(7 * cycleLength); + + // Arithmetic underflow is not caught gracefully, so this triggers a runtime error. + // Preferably, it would return a `ERR_STACKING_NOT_DELEGATED` error. + expect(() => + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ) + ).toThrow(); + }); + + it("requires a positive increase amount", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + 0, + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INVALID_AMOUNT)); + }); + + it("cannot be called indirectly by an unauthorized caller", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + const delegateStackIncreaseArgs = [ + Cl.principal(account.stxAddress), + poxAddressToTuple(account.btcAddr), + Cl.uint(maxAmount - amount), + ]; + let response = simnet.callPublicFn( + "indirect", + "delegate-stack-increase", + delegateStackIncreaseArgs, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly by an authorized caller", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + allowContractCaller(`${deployer}.indirect`, null, address2); + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + const delegateStackIncreaseArgs = [ + Cl.principal(account.stxAddress), + poxAddressToTuple(account.btcAddr), + Cl.uint(maxAmount - amount), + ]; + let response = simnet.callPublicFn( + "indirect", + "delegate-stack-increase", + delegateStackIncreaseArgs, + address2 + ); + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(maxAmount), + }) + ); + }); + + it("cannot be called for a solo stacker", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const authId = 1; + + stackStx( + account, + amount, + startBurnHeight, + lockPeriod, + maxAmount, + authId, + account.stxAddress + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACKING_NOT_DELEGATED)); + }); + + it("can only be called by the delegate", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address3 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can increase to the total account balance", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const balance = simnet.getAssetsMap().get("STX")?.get(account.stxAddress)!; + + delegateStx(balance, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + balance - amount, + address2 + ); + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(balance), + }) + ); + }); + + it("cannot increase to more than the total account balance", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const balance = simnet.getAssetsMap().get("STX")?.get(account.stxAddress)!; + + delegateStx(balance, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + balance - amount + 1n, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS) + ); + }); + + it("cannot increase to more than the delegated amount", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_DELEGATION_TOO_MUCH_LOCKED) + ); + }); +}); + +describe("test `stack-aggregation-increase`", () => { + it("returns `(ok uint)` and increases stacked amount on success", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + // check the amount in the reward set + let info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + let tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + + response = stackAggregationIncrease( + account, + rewardCycle, + index, + maxAmount, + authId, + address2 + ); + expect(response.result).toBeOk(Cl.bool(true)); + + // check that the amount was increased + info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(maxAmount); + }); + + it("cannot be called indirectly from unauthorized caller", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const period = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + const sigArgs = { + authId, + maxAmount, + rewardCycle: Number(rewardCycle), + period: Number(period), + topic: Pox4SignatureTopic.AggregateIncrease, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const args = [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.uint(index), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + response = simnet.callPublicFn( + "indirect", + "stack-aggregation-increase", + args, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + // check that the amount was not increased + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + }); + + it("can be called indirectly from an authorized caller", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const period = 1; + const authId = 1; + + allowContractCaller(`${deployer}.indirect`, null, address2); + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + const sigArgs = { + authId, + maxAmount, + rewardCycle: Number(rewardCycle), + period: Number(period), + topic: Pox4SignatureTopic.AggregateIncrease, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const args = [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.uint(index), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + response = simnet.callPublicFn( + "indirect", + "stack-aggregation-increase", + args, + address2 + ); + expect(response.result).toBeOk(Cl.bool(true)); + + // check that the amount was increased + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(maxAmount); + }); + + it("returns an error for current reward cycle", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + simnet.mineEmptyBlocks(1100); + + response = stackAggregationIncrease( + account, + rewardCycle, + index, + maxAmount, + authId, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + + // check that the amount was not increased + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + }); + + it("returns an error for switching pox address", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const period = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + const sigArgs = { + authId, + maxAmount, + rewardCycle: Number(rewardCycle), + period: Number(period), + topic: Pox4SignatureTopic.AggregateIncrease, + poxAddress: stackers[1].btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const args = [ + poxAddressToTuple(stackers[1].btcAddr), + Cl.uint(rewardCycle), + Cl.uint(index), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-increase", + args, + address2 + ); + // Note: I don't think it is possible to reach the `ERR_DELEGATION_WRONG_REWARD_SLOT` error + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL) + ); + + // check that the amount was not increased + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + }); + + it("cannot increase more than the authorized amount", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const authAmount = minAmount * 3n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + authAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + // check the amount in the reward set + let info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + let tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + + response = stackAggregationIncrease( + account, + rewardCycle, + index, + authAmount, + authId, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH) + ); + + // check that the amount was not increased + info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + }); + + it("cannot change signers", () => { + const account = stackers[0]; + const account1 = stackers[1]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const authAmount = minAmount * 3n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const period = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + authAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + const sigArgs = { + authId, + maxAmount, + rewardCycle: Number(rewardCycle), + period: Number(period), + topic: Pox4SignatureTopic.AggregateIncrease, + poxAddress: account.btcAddr, + signerPrivateKey: account1.signerPrivKey, + }; + const signerSignature = account1.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account1.signerPubKey); + + const args = [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.uint(index), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-increase", + args, + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_INVALID_SIGNER_KEY)); + + // check that the amount was not increased + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + }); +}); + +describe("test `delegate-stack-extend`", () => { + it("returns `(ok )` on success", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address2 + ); + // unlock height should be cycle 8: 8 * 1050 = 8400 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(8400), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(7); + }); + + it("can extend after commit", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address2 + ); + // unlock height should be cycle 8: 8 * 1050 = 8400 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(8400), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(7); + }); + + it("can extend after lock has started", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + + simnet.mineEmptyBlocks(1100); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address2 + ); + // unlock height should be cycle 8: 8 * 1050 = 8400 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(8400), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(7); + }); + + it("can extend multiple times", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 2, + address2 + ); + // unlock height should be cycle 4: 4 * 1050 = 4200 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(4200), + }) + ); + + response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 3, + address2 + ); + // unlock height should be cycle 7: 7 * 1050 = 7350 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(7350), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(6); + }); + + it("can extend multiple times while locked", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + simnet.mineEmptyBlocks(1100); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 2, + address2 + ); + // unlock height should be cycle 4: 4 * 1050 = 4200 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(4200), + }) + ); + + simnet.mineEmptyBlocks(3000); + + response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 3, + address2 + ); + // unlock height should be cycle 7: 7 * 1050 = 7350 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(7350), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(3); + expect(tuple.data["lock-period"]).toBeUint(4); + }); + + it("cannot extend 0 cycles", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 0, + address2 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); + + it("cannot extend beyond 12 cycles", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 12, + address2 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); + + it("cannot be called indirectly by an unauthorized caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + const delegateStackExtendArgs = [ + Cl.principal(account.stxAddress), + poxAddressToTuple(account.btcAddr), + Cl.uint(6), + ]; + let response = simnet.callPublicFn( + "indirect", + "delegate-stack-extend", + delegateStackExtendArgs, + address2 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); + + it("can be called indirectly by an authorized caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + allowContractCaller(`${deployer}.indirect`, null, address2); + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + const delegateStackExtendArgs = [ + Cl.principal(account.stxAddress), + poxAddressToTuple(account.btcAddr), + Cl.uint(6), + ]; + let response = simnet.callPublicFn( + "indirect", + "delegate-stack-extend", + delegateStackExtendArgs, + address2 + ); + + // unlock height should be cycle 8: 8 * 1050 = 84000 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(8400), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(7); + }); + + it("cannot extend if not locked", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACK_EXTEND_NOT_LOCKED)); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalNone); + }); + + it("cannot extend after lock has expired", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 1; + const period = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + period, + address2 + ); + stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + + simnet.mineEmptyBlocks(2200); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACK_EXTEND_NOT_LOCKED)); + + const info = getStackerInfo(account.stxAddress); + expect(info.result).toBeNone(); + }); + + it("cannot extend at unlock height", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 1; + const period = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + period, + address2 + ); + stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + + // mine until the unlock height + simnet.mineEmptyBlocks(2100 - simnet.blockHeight); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACK_EXTEND_NOT_LOCKED)); + + const info = getStackerInfo(account.stxAddress); + expect(info.result).toBeNone(); + }); + + it("cannot extend a solo-stacked stacker", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const period = 1; + const authId = 1; + + stackStx( + account, + amount, + 1000, + period, + maxAmount, + authId, + account.stxAddress + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + account.stxAddress + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACKING_NOT_DELEGATED)); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); + + it("cannot extend a stacker not delegated to the caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address3 + ); + // unlock height should be cycle 8: 8 * 1050 = 8400 + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); + + it("cannot extend to a different pox addr", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + stackers[1].btcAddr, + 6, + address2 + ); + // unlock height should be cycle 8: 8 * 1050 = 8400 + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_DELEGATION_POX_ADDR_REQUIRED) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); + + it("can extend to a different pox addr if one was not specified", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, null, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + stackers[1].btcAddr, + 6, + address2 + ); + // unlock height should be cycle 8: 8 * 1050 = 8400 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(8400), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(7); + }); + + it("can extend within the delegation window", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, 5250, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 3, + address2 + ); + // unlock height should be cycle 5: 5 * 1050 = 5250 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(5250), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(4); + }); + + it("cannot extend outside the delegation window", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, 5249, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 3, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_DELEGATION_EXPIRES_DURING_LOCK) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); +}); + +describe("test `get-partial-stacked-by-cycle`", () => { + it("returns the correct amount", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-partial-stacked-by-cycle", + [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.principal(address2), + ], + address2 + ); + expect(info.result).toBeSome( + Cl.tuple({ + "stacked-amount": Cl.uint(amount), + }) + ); + }); + + it("returns `none` when there are no partially stacked STX", () => { + const account = stackers[0]; + const rewardCycle = 1; + + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-partial-stacked-by-cycle", + [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.principal(address2), + ], + address2 + ); + expect(info.result).toBeNone(); + }); + + it("returns `none` after fully stacked", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-partial-stacked-by-cycle", + [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.principal(address2), + ], + address2 + ); + expect(info.result).toBeSome( + Cl.tuple({ + "stacked-amount": Cl.uint(amount), + }) + ); + + stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + + info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-partial-stacked-by-cycle", + [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.principal(address2), + ], + address2 + ); + expect(info.result).toBeNone(); + }); + + it("returns the correct amount for multiple cycles", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 4; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-partial-stacked-by-cycle", + [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.principal(address2), + ], + address2 + ); + expect(info.result).toBeSome( + Cl.tuple({ + "stacked-amount": Cl.uint(amount), + }) + ); + }); +}); diff --git a/contrib/boot-contracts-unit-tests/tests/pool-stacker.test.ts b/contrib/boot-contracts-unit-tests/tests/pool-stacker.test.ts new file mode 100644 index 0000000000..eb609ca0ad --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tests/pool-stacker.test.ts @@ -0,0 +1,377 @@ +import { beforeEach, describe, expect, it } from "vitest"; +import { + ERRORS, + POX_CONTRACT, + allowContractCaller, + checkDelegateStxEvent, + delegateStx, + disallowContractCaller, + revokeDelegateStx, + stackers, +} from "./helpers"; +import { Cl } from "@stacks/transactions"; +import { poxAddressToTuple } from "@stacks/stacking"; + +const accounts = simnet.getAccounts(); +const deployer = accounts.get("deployer")!; +const address1 = accounts.get("wallet_1")!; +const address2 = accounts.get("wallet_2")!; +const address3 = accounts.get("wallet_3")!; +const initial_balance = 100000000000000n; + +beforeEach(() => { + simnet.setEpoch("3.0"); +}); + +describe("delegate-stx", () => { + const amount = 1000000; + const untilBurnHeight = 1000; + + it("returns `(ok true)` on success", () => { + const delegateResponse = delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + }); + + it("can omit the `until-burn-ht`", () => { + const delegateResponse = delegateStx( + amount, + address2, + null, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + }); + + it("can omit the `pox-addr`", () => { + const delegateResponse = delegateStx( + amount, + address2, + null, + null, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + }); + + it("emits the correct event on success", () => { + const delegateResponse = delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.events).toHaveLength(1); + let event = delegateResponse.events[0]; + checkDelegateStxEvent( + event, + address1, + initial_balance, + 0n, + 0n, + BigInt(amount), + address2, + stackers[0].btcAddr, + BigInt(untilBurnHeight) + ); + }); + + it("fails if the account is already delegated", () => { + let delegateResponse = delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + delegateResponse = delegateStx( + amount, + address3, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_ALREADY_DELEGATED) + ); + }); + + it("fails if called indirectly through an unapproved contract", () => { + const delegateStxArgs = [ + Cl.uint(amount), + Cl.principal(address2), + Cl.none(), + Cl.none(), + ]; + + const delegateResponse = simnet.callPublicFn( + "indirect", + "delegate-stx", + delegateStxArgs, + address1 + ); + + expect(delegateResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly through an approved contract", () => { + allowContractCaller(`${deployer}.indirect`, null, address1); + + const delegateStxArgs = [ + Cl.uint(amount), + Cl.principal(address2), + Cl.none(), + Cl.none(), + ]; + + const delegateResponse = simnet.callPublicFn( + "indirect", + "delegate-stx", + delegateStxArgs, + address1 + ); + + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + }); + + it("fails if the pox address version is invalid", () => { + let poxAddr = poxAddressToTuple(stackers[0].btcAddr); + poxAddr.data["version"] = Cl.bufferFromHex("0a"); + const delegateStxArgs = [ + Cl.uint(amount), + Cl.principal(address2), + Cl.none(), + Cl.some(poxAddr), + ]; + + const delegateResponse = simnet.callPublicFn( + POX_CONTRACT, + "delegate-stx", + delegateStxArgs, + address1 + ); + + expect(delegateResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_POX_ADDRESS) + ); + }); + + it("fails if the pox address hashbytes is invalid", () => { + let poxAddr = poxAddressToTuple(stackers[0].btcAddr); + poxAddr.data["hashbytes"] = Cl.bufferFromHex("deadbeef"); + const delegateStxArgs = [ + Cl.uint(amount), + Cl.principal(address2), + Cl.none(), + Cl.some(poxAddr), + ]; + + const delegateResponse = simnet.callPublicFn( + POX_CONTRACT, + "delegate-stx", + delegateStxArgs, + address1 + ); + + expect(delegateResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_POX_ADDRESS) + ); + }); +}); + +describe("revoke-delegate-stx", () => { + it("returns prior state on success", () => { + const amount = 1000000; + const untilBurnHeight = 123; + delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.some(Cl.uint(untilBurnHeight)), + }) + ) + ); + }); + + it("fails if the account is not delegated", () => { + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeErr( + Cl.int(ERRORS.ERR_DELEGATION_ALREADY_REVOKED) + ); + }); + + it("fails if the delegation was already revoked", () => { + const amount = 1000000; + const untilBurnHeight = 123; + delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + + // First revoke passes + let revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.some(Cl.uint(untilBurnHeight)), + }) + ) + ); + + // Second revoke fails + revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeErr( + Cl.int(ERRORS.ERR_DELEGATION_ALREADY_REVOKED) + ); + }); + + it("fails if the delegation has expired", () => { + const amount = 1000000; + const untilBurnHeight = 3; + delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + while (simnet.blockHeight <= untilBurnHeight) { + simnet.mineEmptyBlock(); + } + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeErr( + Cl.int(ERRORS.ERR_DELEGATION_ALREADY_REVOKED) + ); + }); + + it("fails when called by unapproved caller", () => { + const revokeResponse = simnet.callPublicFn( + "indirect", + "revoke-delegate-stx", + [], + address1 + ); + + expect(revokeResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("passes when called by approved caller", () => { + const amount = 1000000; + const untilBurnHeight = 123; + + delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + allowContractCaller(`${deployer}.indirect`, null, address1); + + const revokeResponse = simnet.callPublicFn( + "indirect", + "revoke-delegate-stx", + [], + address1 + ); + + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.some(Cl.uint(untilBurnHeight)), + }) + ) + ); + }); +}); + +describe("allow-contract-caller", () => { + it("returns `(ok true)` on success", () => { + const response = allowContractCaller( + `${deployer}.indirect`, + null, + address1 + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("cannot be called indirectly", () => { + const response = simnet.callPublicFn( + "indirect", + "allow-contract-caller", + [Cl.principal(`${deployer}.indirect`), Cl.none()], + address1 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); +}); + +describe("disallow-contract-caller", () => { + it("returns `(ok true)` on success", () => { + allowContractCaller(`${deployer}.indirect`, null, address1); + const response = disallowContractCaller(`${deployer}.indirect`, address1); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("cannot be called indirectly", () => { + const response = simnet.callPublicFn( + "indirect", + "disallow-contract-caller", + [Cl.principal(`${deployer}.indirect`)], + address1 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("cannot be called indirectly, even by an approved caller", () => { + allowContractCaller(`${deployer}.indirect`, null, address1); + const response = simnet.callPublicFn( + "indirect", + "disallow-contract-caller", + [Cl.principal(`${deployer}.indirect`)], + address1 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("returns `(ok false)` if the caller was not allowed", () => { + const response = disallowContractCaller(`${deployer}.indirect`, address1); + expect(response.result).toBeOk(Cl.bool(false)); + }); +}); diff --git a/contrib/boot-contracts-unit-tests/tests/scenarios.test.ts b/contrib/boot-contracts-unit-tests/tests/scenarios.test.ts new file mode 100644 index 0000000000..a5ae2fe5b2 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tests/scenarios.test.ts @@ -0,0 +1,345 @@ +import { beforeEach, describe, expect, it } from "vitest"; +import { + ERRORS, + delegateStackExtend, + delegateStackIncrease, + delegateStackStx, + delegateStx, + getPoxInfo, + getStackingMinimum, + revokeDelegateStx, + stackers, +} from "./helpers"; +import { Cl } from "@stacks/transactions"; +import { poxAddressToTuple } from "@stacks/stacking"; + +const accounts = simnet.getAccounts(); +const address1 = accounts.get("wallet_1")!; +const address2 = accounts.get("wallet_2")!; +const address3 = accounts.get("wallet_3")!; + +beforeEach(() => { + simnet.setEpoch("3.0"); +}); + +describe("switching delegates`", () => { + it("is allowed while stacked", () => { + const amount = getStackingMinimum() * 2n; + + // Delegate to address2 + let delegateResponse = delegateStx( + amount, + address2, + null, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address2 stacks + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + simnet.blockHeight, + 4, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amount), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(5250), + }) + ); + + // Revoke delegation to address2 + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.none(), + }) + ) + ); + + // Delegate to address3 + delegateResponse = delegateStx( + amount, + address3, + null, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + }); + + it("revoked delegate cannot extend or increase", () => { + const stackingMinimum = getStackingMinimum(); + const amount = stackingMinimum * 2n; + + // Delegate to address2 + let delegateResponse = delegateStx( + amount, + address2, + null, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address2 stacks + const { result } = delegateStackStx( + address1, + stackingMinimum, + stackers[0].btcAddr, + simnet.blockHeight, + 2, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(stackingMinimum), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(3150), + }) + ); + + // Revoke delegation to address2 + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.none(), + }) + ) + ); + + // Delegate to address3 + delegateResponse = delegateStx( + amount, + address3, + null, + stackers[1].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address2 tries to extend + let extendResponse = delegateStackExtend( + address1, + stackers[0].btcAddr, + 1n, + address2 + ); + expect(extendResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + // Address2 tries to increase + let increaseResponse = delegateStackIncrease( + address1, + stackers[0].btcAddr, + 100n, + address2 + ); + expect(increaseResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("new delegate cannot lock before previous delegation unlocks", () => { + const stackingMinimum = getStackingMinimum(); + const amount = stackingMinimum * 2n; + const poxInfo = getPoxInfo(); + let unlockHeight = poxInfo.rewardCycleLength * 3n; + + // Delegate to address2 + let delegateResponse = delegateStx( + amount, + address2, + null, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address2 stacks + let delegateStackStxResponse = delegateStackStx( + address1, + stackingMinimum, + stackers[0].btcAddr, + simnet.blockHeight, + 2, + address2 + ); + expect(delegateStackStxResponse.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(stackingMinimum), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(unlockHeight), + }) + ); + + // Revoke delegation to address2 + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.none(), + }) + ) + ); + + // Delegate to address3 + delegateResponse = delegateStx( + amount, + address3, + null, + stackers[1].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address3 tries to re-stack + delegateStackStxResponse = delegateStackStx( + address1, + stackingMinimum, + stackers[1].btcAddr, + simnet.blockHeight, + 2, + address3 + ); + expect(delegateStackStxResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_ALREADY_STACKED) + ); + + // Address3 can stack after unlock + simnet.mineEmptyBlocks(Number(unlockHeight) - simnet.blockHeight + 1); + unlockHeight = poxInfo.rewardCycleLength * 6n; + + delegateStackStxResponse = delegateStackStx( + address1, + stackingMinimum + 2n, + stackers[1].btcAddr, + simnet.blockHeight, + 2, + address3 + ); + expect(delegateStackStxResponse.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(stackingMinimum + 2n), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(unlockHeight), + }) + ); + }); + + it("New delegate cannot extend or increase", () => { + const stackingMinimum = getStackingMinimum(); + const amount = stackingMinimum * 2n; + + // Delegate to address2 + let delegateResponse = delegateStx( + amount, + address2, + null, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address2 stacks + const { result } = delegateStackStx( + address1, + stackingMinimum, + stackers[0].btcAddr, + simnet.blockHeight, + 2, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(stackingMinimum), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(3150), + }) + ); + + // Revoke delegation to address2 + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.none(), + }) + ) + ); + + // Delegate to address3 + delegateResponse = delegateStx( + amount, + address3, + null, + stackers[1].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address3 tries to extend to same pox address + let extendResponse = delegateStackExtend( + address1, + stackers[0].btcAddr, + 1n, + address3 + ); + expect(extendResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + // Address3 tries to extend to new pox address + extendResponse = delegateStackExtend( + address1, + stackers[1].btcAddr, + 1n, + address3 + ); + expect(extendResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + // Address3 tries to increase with same pox address + let increaseResponse = delegateStackIncrease( + address1, + stackers[0].btcAddr, + 100n, + address3 + ); + expect(increaseResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + // Address3 tries to increase with new pox address + increaseResponse = delegateStackIncrease( + address1, + stackers[1].btcAddr, + 100n, + address3 + ); + expect(increaseResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); +}); diff --git a/contrib/boot-contracts-unit-tests/tests/solo-stacker.test.ts b/contrib/boot-contracts-unit-tests/tests/solo-stacker.test.ts new file mode 100644 index 0000000000..b3531593b4 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tests/solo-stacker.test.ts @@ -0,0 +1,1464 @@ +import { Cl, ClarityType, isClarityType } from "@stacks/transactions"; +import { describe, expect, it, beforeEach, assert } from "vitest"; + +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { Simnet } from "@hirosystems/clarinet-sdk"; +import { + ERRORS, + POX_CONTRACT, + allowContractCaller, + burnHeightToRewardCycle, + delegateStackStx, + delegateStx, + getPoxInfo, + stackExtend, + stackIncrease, + stackStx, + stackers, +} from "./helpers"; + +const accounts = simnet.getAccounts(); +const deployer = accounts.get("deployer")!; +const address1 = accounts.get("wallet_1")!; +const address2 = accounts.get("wallet_2")!; + +const initialSTXBalance = 100_000_000 * 1e6; + +const maxAmount = 20960000000000; + +const getTotalStacked = ( + simnet: Simnet, + poxContract: string, + cycleId: number | bigint +) => { + const totalStacked = simnet.callReadOnlyFn( + poxContract, + "get-total-ustx-stacked", + [Cl.uint(cycleId)], + address1 + ); + // @ts-ignore + return totalStacked.result.value as bigint; +}; + +const stackingThreshold = 125000000000; + +describe("pox-4", () => { + beforeEach(async () => { + simnet.setEpoch("3.0"); + }); + + it("can call get-pox-info", async () => { + const poxInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-pox-info", + [], + address1 + ); + assert(isClarityType(poxInfo.result, ClarityType.ResponseOk)); + }); + + /* + (stack-stx (amount-ustx uint) + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (start-burn-ht uint) + (lock-period uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + */ + + describe("stack-stx", () => { + it("can stack stxs", async () => { + const account = stackers[0]; + const rewardCycle = 0; + const burnBlockHeight = 1; + const period = 10; + const authId = 1; + + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-stx", + stackStxArgs, + address1 + ); + + expect(response.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(187500000000), + "signer-key": Cl.bufferFromHex(account.signerPubKey), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(11550), + }) + ); + + const stxAccount = simnet.runSnippet(`(stx-account '${address1})`); + expect(stxAccount).toBeTuple({ + locked: Cl.uint(ustxAmount), + unlocked: Cl.uint(initialSTXBalance - ustxAmount), + "unlock-height": Cl.uint(11550), + }); + }); + + it("unlocks stxs after period is ended", async () => { + const account = stackers[0]; + const rewardCycle = 0; + const burnBlockHeight = 1; + const period = 2; + const authId = 1; + + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = initialSTXBalance * 0.2; // lock 20% of total balance + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-stx", + stackStxArgs, + address1 + ); + expect(response.result).toHaveClarityType(ClarityType.ResponseOk); + + // try to transfer 90% of balance (should fail because 20% is locked) + const { result: resultErr } = simnet.transferSTX( + initialSTXBalance * 0.9, + address2, + address1 + ); + expect(resultErr).toBeErr(Cl.uint(1)); + + simnet.mineEmptyBlocks(4000); + + const stxAccount = simnet.runSnippet(`(stx-account '${address1})`); + expect(stxAccount).toBeTuple({ + locked: Cl.uint(0), + unlocked: Cl.uint(initialSTXBalance), + "unlock-height": Cl.uint(0), + }); + + // try to transfer 90% of balance (should succeed because period is ended) + const { result: resultOk } = simnet.transferSTX( + initialSTXBalance * 0.9, + address2, + address1 + ); + expect(resultOk).toBeOk(Cl.bool(true)); + }); + + it("can stack stxs from multiple accounts with the same key", () => { + const signerAccount = stackers[0]; + const rewardCycle = 0; + const burnBlockHeight = 0; + const period = 10; + + const signerAccountKey = Cl.bufferFromHex(signerAccount.signerPubKey); + + let i = 0; + for (const account of stackers) { + const authId = i; + i++; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: signerAccount.signerPrivKey, + }; + const signerSignature = signerAccount.client.signPoxSignature(sigArgs); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerAccountKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-stx", + stackStxArgs, + account.stxAddress + ); + + expect(response.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(187500000000), + "signer-key": Cl.bufferFromHex(signerAccount.signerPubKey), + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(11550), + }) + ); + + const stxAccount = simnet.runSnippet( + `(stx-account '${account.stxAddress})` + ); + expect(stxAccount).toBeTuple({ + locked: Cl.uint(ustxAmount), + unlocked: Cl.uint(initialSTXBalance - ustxAmount), + "unlock-height": Cl.uint(11550), + }); + } + }); + + it("returns an error for an invalid start height", async () => { + const account = stackers[0]; + const burnBlockHeight = 2000; + const period = 10; + const authId = 1; + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const response = stackStx( + account, + ustxAmount, + burnBlockHeight, + period, + ustxAmount, + authId, + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_INVALID_START_BURN_HEIGHT) + ); + }); + + it("cannot be called indirectly by an unapproved caller", async () => { + const account = stackers[0]; + const rewardCycle = 0; + const burnBlockHeight = 1; + const period = 10; + const authId = 1; + + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + const response = simnet.callPublicFn( + "indirect", + "stack-stx", + stackStxArgs, + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly by an approved caller", async () => { + const account = stackers[0]; + const rewardCycle = 0; + const burnBlockHeight = 1; + const period = 10; + const authId = 1; + + allowContractCaller(`${deployer}.indirect`, null, address1); + + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + const response = simnet.callPublicFn( + "indirect", + "stack-stx", + stackStxArgs, + address1 + ); + + expect(response.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(187500000000), + "signer-key": Cl.bufferFromHex(account.signerPubKey), + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(11550), + }) + ); + + const stxAccount = simnet.runSnippet( + `(stx-account '${account.stxAddress})` + ); + expect(stxAccount).toBeTuple({ + locked: Cl.uint(ustxAmount), + unlocked: Cl.uint(initialSTXBalance - ustxAmount), + "unlock-height": Cl.uint(11550), + }); + }); + + it("returns an error if the stacker is already stacked", async () => { + const account = stackers[0]; + const burnBlockHeight = 0; + const period = 10; + const authId = 1; + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + stackStx( + account, + ustxAmount, + burnBlockHeight, + period, + ustxAmount, + authId, + address1 + ); + + const response = stackStx( + account, + ustxAmount, + burnBlockHeight, + period, + ustxAmount, + authId, + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_ALREADY_STACKED) + ); + }); + + it("returns an error if the stacker is already delegated", async () => { + const account = stackers[0]; + const burnBlockHeight = 0; + const period = 10; + const authId = 1; + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + delegateStx( + ustxAmount, + address2, + burnBlockHeight, + account.btcAddr, + address1 + ); + + const response = stackStx( + account, + ustxAmount, + burnBlockHeight, + period, + ustxAmount, + authId, + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_ALREADY_DELEGATED) + ); + }); + + it("returns an error if the stacker has an insufficient balance", async () => { + const account = stackers[0]; + const burnBlockHeight = 0; + const period = 10; + const authId = 1; + const ustxAmount = simnet.getAssetsMap().get("STX")?.get(address1)! + 10n; + + const response = stackStx( + account, + ustxAmount, + burnBlockHeight, + period, + ustxAmount, + authId, + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS) + ); + }); + + it("returns an error if the signature is already used", async () => { + const account = stackers[0]; + const burnBlockHeight = 0; + const period = 10; + const authId = 1; + const ustxAmount = Math.floor(stackingThreshold * 1.5); + const rewardCycle = 0; + + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + + simnet.callPrivateFn( + POX_CONTRACT, + "consume-signer-key-authorization", + [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.stringAscii(Pox4SignatureTopic.StackStx), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(ustxAmount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + + const response = stackStx( + account, + ustxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + address1 + ); + + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_SIGNER_AUTH_USED)); + }); + }); + + describe("stack-extend", () => { + it("can extend stacking during the last stacking cycle", () => { + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + const stackSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 0, + period: 2, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(2), + Cl.some(Cl.bufferFromHex(stackSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-stx", + stackStxArgs, + address1 + ); + expect(response.result).toHaveClarityType(ClarityType.ResponseOk); + + // advance to cycle 1 + simnet.mineEmptyBlocks(cycleLength); + + // advance to cycle 2 + simnet.mineEmptyBlocks(cycleLength); + // call stack-extend for 2 more cycles + const extendSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 2, + period: 2, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const extendArgs = [ + Cl.uint(2), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(extendSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + extendArgs, + address1 + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(cycleLength * 5), + }) + ); + + // advance to cycle 3 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle3 = getTotalStacked(simnet, POX_CONTRACT, 3); + expect(totalCycle3).toBe(BigInt(ustxAmount)); + + // advance to cycle 4 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle4 = getTotalStacked(simnet, POX_CONTRACT, 4); + expect(totalCycle4).toBe(BigInt(ustxAmount)); + + // advance to cycle 5 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle5 = getTotalStacked(simnet, POX_CONTRACT, 5); + expect(totalCycle5).toBe(0n); + }); + + it("can extend stacking up to 11 cycles", () => { + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + const stackSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 0, + period: 2, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(2), + Cl.some(Cl.bufferFromHex(stackSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + simnet.callPublicFn(POX_CONTRACT, "stack-stx", stackStxArgs, address1); + + // advance to cycle 1 + simnet.mineEmptyBlocks(cycleLength * 2); + + // call stack-extend for 11 more cycles + const extendSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 2, + period: 11, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const extendArgs = [ + Cl.uint(11), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(extendSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + extendArgs, + address1 + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(cycleLength * (11 + 2 + 1)), + }) + ); + }); + + it("can not extend stacking for more than 11 cycles", () => { + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + const stackSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 0, + period: 2, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(2), + Cl.some(Cl.bufferFromHex(stackSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + simnet.callPublicFn(POX_CONTRACT, "stack-stx", stackStxArgs, address1); + + // advance to cycle 1 + simnet.mineEmptyBlocks(cycleLength * 2); + + // call stack-extend for 12 more cycles + const extendSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 2, + period: 12, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const extendArgs = [ + Cl.uint(12), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(extendSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + extendArgs, + address1 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD)); + }); + + it("can extend stacking during any stacking cycle", () => { + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + const stackSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 0, + period: 2, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(2), + Cl.some(Cl.bufferFromHex(stackSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-stx", + stackStxArgs, + address1 + ); + expect(response.result).toHaveClarityType(ClarityType.ResponseOk); + + // advance to cycle 1 + simnet.mineEmptyBlocks(cycleLength); + // call stack-extend for 2 more cycles + const extendSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 1, + period: 2, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const extendArgs = [ + Cl.uint(2), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(extendSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + extendArgs, + address1 + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(cycleLength * 5), + }) + ); + + // advance to cycle 2 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle2 = getTotalStacked(simnet, POX_CONTRACT, 2); + expect(totalCycle2).toBe(BigInt(ustxAmount)); + + // advance to cycle 3 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle3 = getTotalStacked(simnet, POX_CONTRACT, 3); + expect(totalCycle3).toBe(BigInt(ustxAmount)); + + // advance to cycle 4 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle4 = getTotalStacked(simnet, POX_CONTRACT, 4); + expect(totalCycle4).toBe(BigInt(ustxAmount)); + + // advance to cycle 5 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle5 = getTotalStacked(simnet, POX_CONTRACT, 5); + expect(totalCycle5).toBe(0n); + }); + + it("can not extend stacking after stacking end", () => { + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + const stackSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 0, + period: 2, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(2), + Cl.some(Cl.bufferFromHex(stackSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-stx", + stackStxArgs, + address1 + ); + expect(response.result).toHaveClarityType(ClarityType.ResponseOk); + + // advance to cycle 3 + simnet.mineEmptyBlocks(cycleLength * 3); + + const extendSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 3, + period: 2, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const extendArgs = [ + Cl.uint(2), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(extendSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + extendArgs, + address1 + ); + expect(result).toBeErr(Cl.int(26)); + }); + + it("cannot be called indirectly from an unauthorized caller", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 2; + + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const stackExtendArgs = [ + Cl.uint(2), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + "indirect", + "stack-extend", + stackExtendArgs, + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly from an authorized caller", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 2; + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + allowContractCaller(`${deployer}.indirect`, null, address1); + + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const stackExtendArgs = [ + Cl.uint(2), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + "indirect", + "stack-extend", + stackExtendArgs, + address1 + ); + + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(5 * cycleLength), + }) + ); + }); + + it("cannot extend for 0 cycles", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 2; + + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + const { result } = stackExtend( + account, + 0, + maxAmount, + authId, + account.stxAddress + ); + + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD)); + }); + + it("errors if not directly stacking", () => { + const account = stackers[0]; + const delegateAccount = stackers[1]; + const authId = account.authId; + const period = 6; + + delegateStx( + maxAmount, + delegateAccount.stxAddress, + null, + null, + account.stxAddress + ); + delegateStackStx( + address1, + maxAmount, + delegateAccount.btcAddr, + 1000, + period, + address2 + ); + + const { result } = stackExtend( + account, + 4, + maxAmount, + authId, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_IS_DELEGATED)); + }); + + it("can change the pox address", () => { + const account = stackers[0]; + const account1 = stackers[1]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 6; + const extendPeriod = 5; + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period: extendPeriod, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account1.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const stackExtendArgs = [ + Cl.uint(extendPeriod), + poxAddressToTuple(account1.btcAddr), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + stackExtendArgs, + address1 + ); + + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(12 * cycleLength), + }) + ); + }); + }); + + describe("stack-increase", () => { + it("can increase stacked amount before locked", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + stackStx( + account, + maxAmount, + burnBlockHeight, + 2, + maxAmount, + authId, + account.stxAddress + ); + + const { result } = stackIncrease( + account, + maxAmount, + 2, + maxAmount * 2, + authId, + account.stxAddress + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(maxAmount * 2), + }) + ); + }); + + it("can increase stacked amount after locked", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + stackStx( + account, + maxAmount, + burnBlockHeight, + 2, + maxAmount, + authId, + account.stxAddress + ); + + simnet.mineEmptyBlocks(cycleLength); + + const { result } = stackIncrease( + account, + maxAmount, + 2, + maxAmount * 2, + authId, + account.stxAddress + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(maxAmount * 2), + }) + ); + }); + + it("cannot increase when not stacked", () => { + const account = stackers[0]; + const authId = account.authId; + + const { result } = stackIncrease( + account, + maxAmount, + 2, + maxAmount * 2, + authId, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACK_INCREASE_NOT_LOCKED)); + }); + + it("errors if increase-by amount is 0", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + stackStx( + account, + maxAmount, + burnBlockHeight, + 2, + maxAmount, + authId, + account.stxAddress + ); + + const { result } = stackIncrease( + account, + 0, + 2, + maxAmount, + authId, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INVALID_AMOUNT)); + }); + + it("can stack the entire balance", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const balance = simnet + .getAssetsMap() + .get("STX") + ?.get(account.stxAddress)!; + + stackStx( + account, + maxAmount, + burnBlockHeight, + 2, + maxAmount, + authId, + account.stxAddress + ); + + const { result } = stackIncrease( + account, + balance - BigInt(maxAmount), + 2, + 2n ** 128n - 1n, + authId, + account.stxAddress + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(balance), + }) + ); + }); + + it("errors on insufficient funds", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const balance = simnet + .getAssetsMap() + .get("STX") + ?.get(account.stxAddress)!; + + stackStx( + account, + maxAmount, + burnBlockHeight, + 2, + maxAmount, + authId, + account.stxAddress + ); + + const { result } = stackIncrease( + account, + balance - BigInt(maxAmount) + 1n, + 2, + 2n ** 128n - 1n, + authId, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS)); + }); + + it("cannot be called indirectly from an unauthorized caller", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 2; + + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackIncrease, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const stackIncreaseArgs = [ + Cl.uint(maxAmount), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount * 2), + Cl.uint(authId), + ]; + + const { result } = simnet.callPublicFn( + "indirect", + "stack-increase", + stackIncreaseArgs, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED)); + }); + + it("can be called indirectly from an authorized caller", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 2; + + allowContractCaller(`${deployer}.indirect`, null, account.stxAddress); + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount: maxAmount * 2, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackIncrease, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const stackIncreaseArgs = [ + Cl.uint(maxAmount), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount * 2), + Cl.uint(authId), + ]; + + const { result } = simnet.callPublicFn( + "indirect", + "stack-increase", + stackIncreaseArgs, + account.stxAddress + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(maxAmount * 2), + }) + ); + }); + + it("errors if not directly stacking", () => { + const account = stackers[0]; + const delegateAccount = stackers[1]; + const authId = account.authId; + const period = 6; + + delegateStx( + maxAmount * 2, + delegateAccount.stxAddress, + null, + null, + account.stxAddress + ); + delegateStackStx( + address1, + maxAmount, + delegateAccount.btcAddr, + 1000, + period, + address2 + ); + + const { result } = stackIncrease( + account, + maxAmount, + period, + maxAmount * 2, + authId, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_IS_DELEGATED)); + }); + + it("cannot change the pox address", () => { + const account = stackers[0]; + const account1 = stackers[1]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 6; + + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount: maxAmount * 2, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackIncrease, + poxAddress: account1.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const stackIncreaseArgs = [ + Cl.uint(maxAmount), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount * 2), + Cl.uint(authId), + ]; + + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-increase", + stackIncreaseArgs, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_INVALID_SIGNATURE_PUBKEY)); + }); + }); +}); diff --git a/contrib/boot-contracts-unit-tests/tsconfig.json b/contrib/boot-contracts-unit-tests/tsconfig.json new file mode 100644 index 0000000000..1bdaf36c46 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tsconfig.json @@ -0,0 +1,26 @@ + +{ + "compilerOptions": { + "target": "ESNext", + "useDefineForClassFields": true, + "module": "ESNext", + "lib": ["ESNext"], + "skipLibCheck": true, + + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + + "strict": true, + "noImplicitAny": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": [ + "node_modules/@hirosystems/clarinet-sdk/vitest-helpers/src", + "tests" + ] +} diff --git a/contrib/boot-contracts-unit-tests/vitest.config.js b/contrib/boot-contracts-unit-tests/vitest.config.js new file mode 100644 index 0000000000..e7945ebe02 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/vitest.config.js @@ -0,0 +1,44 @@ +/// + +import { defineConfig } from "vite"; +import { + vitestSetupFilePath, + getClarinetVitestsArgv, +} from "@hirosystems/clarinet-sdk/vitest"; + +/* + In this file, Vitest is configured so that it works seamlessly with Clarinet and the Simnet. + + The `vitest-environment-clarinet` will initialise the clarinet-sdk + and make the `simnet` object available globally in the test files. + + `vitestSetupFilePath` points to a file in the `@hirosystems/clarinet-sdk` package that does two things: + - run `before` hooks to initialize the simnet and `after` hooks to collect costs and coverage reports. + - load custom vitest matchers to work with Clarity values (such as `expect(...).toBeUint()`) + + The `getClarinetVitestsArgv()` will parse options passed to the command `vitest run --` + - vitest run -- --manifest ./Clarinet.toml # pass a custom path + - vitest run -- --coverage --costs # collect coverage and cost reports +*/ + +export default defineConfig({ + test: { + environment: "clarinet", // use vitest-environment-clarinet + pool: "forks", + poolOptions: { + forks: { singleFork: true }, + }, + setupFiles: [ + vitestSetupFilePath, + // custom setup files can be added here + ], + environmentOptions: { + clarinet: { + ...getClarinetVitestsArgv(), + includeBootContracts: true, + bootContractsPath: `${process.cwd()}/boot_contracts`, + // add or override options + }, + }, + }, +}); diff --git a/contrib/core-contract-tests/package-lock.json b/contrib/core-contract-tests/package-lock.json index 50ded82d38..f15153b12e 100644 --- a/contrib/core-contract-tests/package-lock.json +++ b/contrib/core-contract-tests/package-lock.json @@ -11,7 +11,8 @@ "dependencies": { "@hirosystems/clarinet-sdk": "^2.4.1", "@stacks/clarunit": "0.0.1", - "@stacks/transactions": "^6.12.0", + "@stacks/stacking": "^6.13.2", + "@stacks/transactions": "^6.13.0", "chokidar-cli": "^3.0.0", "fast-check": "^3.15.1", "typescript": "^5.4.2", @@ -448,9 +449,9 @@ } }, "node_modules/@hirosystems/clarinet-sdk-wasm": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk-wasm/-/clarinet-sdk-wasm-2.4.0.tgz", - "integrity": "sha512-qApXWsnWRtQcj5BsqoKd+AsEtDURA5CJQcRxgCAVjyRSjkbGJXxNgrW9oRnIkfIIKJ6D5mV7JGrr8CQ8BSJ/tg==" + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk-wasm/-/clarinet-sdk-wasm-2.4.2.tgz", + "integrity": "sha512-85RrDiqrfup/h7XLqysdm/J4csmimCRTXHnCiD+4HyKHVhgr7HWL7sGEGpGfThjPxukjV8A+b2GF2x9Rufpz9g==" }, "node_modules/@humanwhocodes/config-array": { "version": "0.11.14", @@ -1219,23 +1220,23 @@ } }, "node_modules/@stacks/common": { - "version": "6.10.0", - "resolved": "https://registry.npmjs.org/@stacks/common/-/common-6.10.0.tgz", - "integrity": "sha512-6x5Z7AKd9/kj3+DYE9xIDIkFLHihBH614i2wqrZIjN02WxVo063hWSjIlUxlx8P4gl6olVzlOy5LzhLJD9OP0A==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@stacks/common/-/common-6.13.0.tgz", + "integrity": "sha512-wwzyihjaSdmL6NxKvDeayy3dqM0L0Q2sawmdNtzJDi0FnXuJGm5PeapJj7bEfcI9XwI7Bw5jZoC6mCn9nc5YIw==", "dependencies": { "@types/bn.js": "^5.1.0", "@types/node": "^18.0.4" } }, "node_modules/@stacks/encryption": { - "version": "6.12.0", - "resolved": "https://registry.npmjs.org/@stacks/encryption/-/encryption-6.12.0.tgz", - "integrity": "sha512-CubE51pHrcxx3yA+xapevPgA9UDleIoEaUZ06/9uD91B42yvTg37HyS8t06rzukU9q+X7Cv2I/+vbuf4nJIo8g==", + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/@stacks/encryption/-/encryption-6.13.1.tgz", + "integrity": "sha512-y5IFX3/nGI3fCk70gE0JwH70GpshD8RhUfvhMLcL96oNaec1cCdj1ZUiQupeicfYTHuraaVBYU9xLls4TRmypg==", "dependencies": { "@noble/hashes": "1.1.5", "@noble/secp256k1": "1.7.1", "@scure/bip39": "1.1.0", - "@stacks/common": "^6.10.0", + "@stacks/common": "^6.13.0", "@types/node": "^18.0.4", "base64-js": "^1.5.1", "bs58": "^5.0.0", @@ -1244,25 +1245,26 @@ } }, "node_modules/@stacks/network": { - "version": "6.11.3", - "resolved": "https://registry.npmjs.org/@stacks/network/-/network-6.11.3.tgz", - "integrity": "sha512-c4ClCU/QUwuu8NbHtDKPJNa0M5YxauLN3vYaR0+S4awbhVIKFQSxirm9Q9ckV1WBh7FtD6u2S0x+tDQGAODjNg==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@stacks/network/-/network-6.13.0.tgz", + "integrity": "sha512-Ss/Da4BNyPBBj1OieM981fJ7SkevKqLPkzoI1+Yo7cYR2df+0FipIN++Z4RfpJpc8ne60vgcx7nJZXQsiGhKBQ==", "dependencies": { - "@stacks/common": "^6.10.0", + "@stacks/common": "^6.13.0", "cross-fetch": "^3.1.5" } }, "node_modules/@stacks/stacking": { - "version": "6.12.0", - "resolved": "https://registry.npmjs.org/@stacks/stacking/-/stacking-6.12.0.tgz", - "integrity": "sha512-XBxwbaCGRPnjpjspb3CBXrlZl6xR+gghLMz9PQNPdpuIbBDFa0SGeHgqjtpVU+2DVL4UyBx8PVsAWtlssyVGng==", + "version": "6.13.2", + "resolved": "https://registry.npmjs.org/@stacks/stacking/-/stacking-6.13.2.tgz", + "integrity": "sha512-4h1UQuL2+Xdra9zMqzUElvKG9X9fenuNE7hD9sIqyxyLFxeQ7gRqczmTYPsmaj4wY5004JNj+efzGJ0VmpOcAA==", "dependencies": { + "@noble/hashes": "1.1.5", "@scure/base": "1.1.1", - "@stacks/common": "^6.10.0", - "@stacks/encryption": "^6.12.0", - "@stacks/network": "^6.11.3", + "@stacks/common": "^6.13.0", + "@stacks/encryption": "^6.13.1", + "@stacks/network": "^6.13.0", "@stacks/stacks-blockchain-api-types": "^0.61.0", - "@stacks/transactions": "^6.12.0", + "@stacks/transactions": "^6.13.1", "bs58": "^5.0.0" } }, @@ -1283,14 +1285,14 @@ "integrity": "sha512-yPOfTUboo5eA9BZL/hqMcM71GstrFs9YWzOrJFPeP4cOO1wgYvAcckgBRbgiE3NqeX0A7SLZLDAXLZbATuRq9w==" }, "node_modules/@stacks/transactions": { - "version": "6.12.0", - "resolved": "https://registry.npmjs.org/@stacks/transactions/-/transactions-6.12.0.tgz", - "integrity": "sha512-gRP3SfTaAIoTdjMvOiLrMZb/senqB8JQlT5Y4C3/CiHhiprYwTx7TbOCSa7WsNOU99H4aNfHvatmymuggXQVkA==", + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/@stacks/transactions/-/transactions-6.13.1.tgz", + "integrity": "sha512-PWw2I+2Fj3CaFYQIoVcqQN6E2qGHNhFv03nuR0CxMq0sx8stPgYZbdzUlnlBcJQdsFiHrw3sPeqnXDZt+Hg5YQ==", "dependencies": { "@noble/hashes": "1.1.5", "@noble/secp256k1": "1.7.1", - "@stacks/common": "^6.10.0", - "@stacks/network": "^6.11.3", + "@stacks/common": "^6.13.0", + "@stacks/network": "^6.13.0", "c32check": "^2.0.0", "lodash.clonedeep": "^4.5.0" } diff --git a/contrib/core-contract-tests/package.json b/contrib/core-contract-tests/package.json index 7ba3ba62e2..fe3dee2eb5 100644 --- a/contrib/core-contract-tests/package.json +++ b/contrib/core-contract-tests/package.json @@ -13,7 +13,8 @@ "dependencies": { "@hirosystems/clarinet-sdk": "^2.4.1", "@stacks/clarunit": "0.0.1", - "@stacks/transactions": "^6.12.0", + "@stacks/stacking": "^6.13.2", + "@stacks/transactions": "^6.13.0", "chokidar-cli": "^3.0.0", "fast-check": "^3.15.1", "typescript": "^5.4.2", diff --git a/docs/mining.md b/docs/mining.md index 8b824924f7..891358af03 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -3,7 +3,7 @@ Stacks tokens (STX) are mined by transferring BTC via PoX. To run as a miner, you should make sure to add the following config fields to your config file: -``` +```toml [node] # Run as a miner miner = True @@ -25,6 +25,8 @@ first_attempt_time_ms = 1000 subsequent_attempt_time_ms = 60000 # Time to spend mining a microblock, in milliseconds. microblock_attempt_time_ms = 30000 +# Time to spend mining a Nakamoto block, in milliseconds. +nakamoto_attempt_time_ms = 10000 ``` You can verify that your node is operating as a miner by checking its log output @@ -40,7 +42,7 @@ INFO [1630127492.062652] [testnet/stacks-node/src/run_loop/neon.rs:164] [main] U Fee and cost estimators can be configured via the config section `[fee_estimation]`: -``` +```toml [fee_estimation] cost_estimator = naive_pessimistic fee_estimator = fuzzed_weighted_median_fee_rate diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index e04dcbbdc1..7da9801674 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -18,8 +18,10 @@ path = "./src/libsigner.rs" [dependencies] clarity = { path = "../clarity" } hashbrown = { workspace = true } +lazy_static = "1.4.0" libc = "0.2" libstackerdb = { path = "../libstackerdb" } +prometheus = { version = "0.9", optional = true } serde = "1" serde_derive = "1" serde_stacker = "0.1" @@ -33,6 +35,7 @@ tiny_http = "0.12" wsts = { workspace = true } [dev-dependencies] +mutants = "0.0.3" rand_core = { workspace = true } rand = { workspace = true } @@ -49,3 +52,6 @@ sha2 = { version = "0.10", features = ["asm"] } [target.'cfg(any(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")), any(target_os = "windows")))'.dependencies] sha2 = { version = "0.10" } + +[features] +monitoring_prom = ["prometheus"] \ No newline at end of file diff --git a/libsigner/src/error.rs b/libsigner/src/error.rs index 101a1b35e9..7c4deadf1b 100644 --- a/libsigner/src/error.rs +++ b/libsigner/src/error.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -71,4 +71,7 @@ pub enum EventError { /// Unrecognized stacker DB contract error #[error("Unrecognized StackerDB contract: {0}")] UnrecognizedStackerDBContract(QualifiedContractIdentifier), + /// Empty chunks event + #[error("Empty chunks event")] + EmptyChunksEvent, } diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 2d156559ff..c603db7f0b 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::fmt::Debug; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpListener, TcpStream}; use std::sync::atomic::{AtomicBool, Ordering}; @@ -51,11 +52,19 @@ use wsts::net::{ use wsts::state_machine::signer; use crate::http::{decode_http_body, decode_http_request}; -use crate::{EventError, SignerMessage}; +use crate::EventError; + +/// Define the trait for the event processor +pub trait SignerEventTrait: + StacksMessageCodec + Clone + Debug + Send +{ +} + +impl SignerEventTrait for T {} #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] /// BlockProposal sent to signers -pub struct BlockProposalSigners { +pub struct BlockProposal { /// The block itself pub block: NakamotoBlock, /// The burn height the block is mined during @@ -64,30 +73,7 @@ pub struct BlockProposalSigners { pub reward_cycle: u64, } -/// Event enum for newly-arrived signer subscribed events -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub enum SignerEvent { - /// A miner sent a message over .miners - /// The `Vec` will contain any block proposals made by the miner during this StackerDB event. - /// The `Vec` will contain any signer WSTS messages made by the miner while acting as a coordinator. - /// The `Option` will contain the message sender's public key if either of the vecs is non-empty. - MinerMessages( - Vec, - Vec, - Option, - ), - /// The signer messages for other signers and miners to observe - /// The u32 is the signer set to which the message belongs (either 0 or 1) - SignerMessages(u32, Vec), - /// A new block proposal validation response from the node - BlockValidationResponse(BlockValidateResponse), - /// Status endpoint request - StatusCheck, - /// A new burn block event was received with the given burnchain block height - NewBurnBlock(u64), -} - -impl StacksMessageCodec for BlockProposalSigners { +impl StacksMessageCodec for BlockProposal { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { self.block.consensus_serialize(fd)?; self.burn_height.consensus_serialize(fd)?; @@ -99,7 +85,7 @@ impl StacksMessageCodec for BlockProposalSigners { let block = NakamotoBlock::consensus_deserialize(fd)?; let burn_height = u64::consensus_deserialize(fd)?; let reward_cycle = u64::consensus_deserialize(fd)?; - Ok(BlockProposalSigners { + Ok(BlockProposal { block, burn_height, reward_cycle, @@ -107,6 +93,24 @@ impl StacksMessageCodec for BlockProposalSigners { } } +/// Event enum for newly-arrived signer subscribed events +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub enum SignerEvent { + /// A miner sent a message over .miners + /// The `Vec` will contain any signer messages made by the miner. + /// The `StacksPublicKey` is the message sender's public key. + MinerMessages(Vec, StacksPublicKey), + /// The signer messages for other signers and miners to observe + /// The u32 is the signer set to which the message belongs (either 0 or 1) + SignerMessages(u32, Vec), + /// A new block proposal validation response from the node + BlockValidationResponse(BlockValidateResponse), + /// Status endpoint request + StatusCheck, + /// A new burn block event was received with the given burnchain block height + NewBurnBlock(u64), +} + /// Trait to implement a stop-signaler for the event receiver thread. /// The caller calls `send()` and the event receiver loop (which lives in a separate thread) will /// terminate. @@ -116,7 +120,7 @@ pub trait EventStopSignaler { } /// Trait to implement to handle signer specific events sent by the Stacks node -pub trait EventReceiver { +pub trait EventReceiver { /// The implementation of ST will ensure that a call to ST::send() will cause /// the call to `is_stopped()` below to return true. type ST: EventStopSignaler + Send + Sync; @@ -124,11 +128,11 @@ pub trait EventReceiver { /// Open a server socket to the given socket address. fn bind(&mut self, listener: SocketAddr) -> Result; /// Return the next event - fn next_event(&mut self) -> Result; + fn next_event(&mut self) -> Result, EventError>; /// Add a downstream event consumer - fn add_consumer(&mut self, event_out: Sender); + fn add_consumer(&mut self, event_out: Sender>); /// Forward the event to downstream consumers - fn forward_event(&mut self, ev: SignerEvent) -> bool; + fn forward_event(&mut self, ev: SignerEvent) -> bool; /// Determine if the receiver should hang up fn is_stopped(&self) -> bool; /// Get a stop signal instance that, when sent, will cause this receiver to stop accepting new @@ -169,23 +173,23 @@ pub trait EventReceiver { } /// Event receiver for Signer events -pub struct SignerEventReceiver { +pub struct SignerEventReceiver { /// Address we bind to local_addr: Option, /// server socket that listens for HTTP POSTs from the node http_server: Option, /// channel into which to write newly-discovered data - out_channels: Vec>, + out_channels: Vec>>, /// inter-thread stop variable -- if set to true, then the `main_loop` will exit stop_signal: Arc, /// Whether the receiver is running on mainnet is_mainnet: bool, } -impl SignerEventReceiver { +impl SignerEventReceiver { /// Make a new Signer event receiver, and return both the receiver and the read end of a /// channel into which node-received data can be obtained. - pub fn new(is_mainnet: bool) -> SignerEventReceiver { + pub fn new(is_mainnet: bool) -> SignerEventReceiver { SignerEventReceiver { http_server: None, local_addr: None, @@ -198,7 +202,7 @@ impl SignerEventReceiver { /// Do something with the socket pub fn with_server(&mut self, todo: F) -> Result where - F: FnOnce(&SignerEventReceiver, &mut HttpServer, bool) -> R, + F: FnOnce(&SignerEventReceiver, &mut HttpServer, bool) -> R, { let mut server = if let Some(s) = self.http_server.take() { s @@ -230,6 +234,7 @@ impl SignerStopSignaler { } impl EventStopSignaler for SignerStopSignaler { + #[cfg_attr(test, mutants::skip)] fn send(&mut self) { self.stop_signal.store(true, Ordering::SeqCst); // wake up the thread so the atomicbool can be checked @@ -243,15 +248,14 @@ impl EventStopSignaler for SignerStopSignaler { body.len(), body ); - match stream.write_all(req.as_bytes()) { - Err(e) => error!("Failed to send shutdown request: {}", e), - _ => (), - }; + if let Err(e) = stream.write_all(req.as_bytes()) { + error!("Failed to send shutdown request: {}", e); + } } } } -impl EventReceiver for SignerEventReceiver { +impl EventReceiver for SignerEventReceiver { type ST = SignerStopSignaler; /// Start listening on the given socket address. @@ -266,7 +270,7 @@ impl EventReceiver for SignerEventReceiver { /// Wait for the node to post something, and then return it. /// Errors are recoverable -- the caller should call this method again even if it returns an /// error. - fn next_event(&mut self) -> Result { + fn next_event(&mut self) -> Result, EventError> { self.with_server(|event_receiver, http_server, _is_mainnet| { // were we asked to terminate? if event_receiver.is_stopped() { @@ -323,7 +327,7 @@ impl EventReceiver for SignerEventReceiver { /// Forward an event /// Return true on success; false on error. /// Returning false terminates the event receiver. - fn forward_event(&mut self, ev: SignerEvent) -> bool { + fn forward_event(&mut self, ev: SignerEvent) -> bool { if self.out_channels.is_empty() { // nothing to do error!("No channels connected to event receiver"); @@ -347,7 +351,7 @@ impl EventReceiver for SignerEventReceiver { } /// Add an event consumer. A received event will be forwarded to this Sender. - fn add_consumer(&mut self, out_channel: Sender) { + fn add_consumer(&mut self, out_channel: Sender>) { self.out_channels.push(out_channel); } @@ -372,10 +376,10 @@ fn ack_dispatcher(request: HttpRequest) { } /// Process a stackerdb event from the node -fn process_stackerdb_event( +fn process_stackerdb_event( local_addr: Option, mut request: HttpRequest, -) -> Result { +) -> Result, EventError> { debug!("Got stackerdb_chunks event"); let mut body = String::new(); if let Err(e) = request.as_reader().read_to_string(&mut body) { @@ -400,7 +404,7 @@ fn process_stackerdb_event( event_contract_id ); ack_dispatcher(request); - return Err(e.into()); + return Err(e); } Ok(x) => x, }; @@ -410,44 +414,28 @@ fn process_stackerdb_event( Ok(signer_event) } -impl TryFrom for SignerEvent { +impl TryFrom for SignerEvent { type Error = EventError; fn try_from(event: StackerDBChunksEvent) -> Result { let signer_event = if event.contract_id.name.as_str() == MINERS_NAME && event.contract_id.is_boot() { - let mut blocks = vec![]; let mut messages = vec![]; let mut miner_pk = None; for chunk in event.modified_slots { + let Ok(msg) = T::consensus_deserialize(&mut chunk.data.as_slice()) else { + continue; + }; + miner_pk = Some(chunk.recover_pk().map_err(|e| { EventError::MalformedRequest(format!( "Failed to recover PK from StackerDB chunk: {e}" )) })?); - if chunk.slot_id % MINER_SLOT_COUNT == 0 { - // block - let Ok(block) = - BlockProposalSigners::consensus_deserialize(&mut chunk.data.as_slice()) - else { - continue; - }; - blocks.push(block); - } else if chunk.slot_id % MINER_SLOT_COUNT == 1 { - // message - let Ok(msg) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - else { - continue; - }; - messages.push(msg); - } else { - return Err(EventError::UnrecognizedEvent( - "Unrecognized slot_id for miners contract".into(), - )); - }; + messages.push(msg); } - SignerEvent::MinerMessages(blocks, messages, miner_pk) + SignerEvent::MinerMessages(messages, miner_pk.ok_or(EventError::EmptyChunksEvent)?) } else if event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot() { let Some((signer_set, _)) = get_signers_db_signer_set_message_id(event.contract_id.name.as_str()) @@ -455,10 +443,10 @@ impl TryFrom for SignerEvent { return Err(EventError::UnrecognizedStackerDBContract(event.contract_id)); }; // signer-XXX-YYY boot contract - let signer_messages: Vec = event + let signer_messages: Vec = event .modified_slots .iter() - .filter_map(|chunk| read_next::(&mut &chunk.data[..]).ok()) + .filter_map(|chunk| read_next::(&mut &chunk.data[..]).ok()) .collect(); SignerEvent::SignerMessages(signer_set, signer_messages) } else { @@ -469,7 +457,9 @@ impl TryFrom for SignerEvent { } /// Process a proposal response from the node -fn process_proposal_response(mut request: HttpRequest) -> Result { +fn process_proposal_response( + mut request: HttpRequest, +) -> Result, EventError> { debug!("Got proposal_response event"); let mut body = String::new(); if let Err(e) = request.as_reader().read_to_string(&mut body) { @@ -495,7 +485,9 @@ fn process_proposal_response(mut request: HttpRequest) -> Result Result { +fn process_new_burn_block_event( + mut request: HttpRequest, +) -> Result, EventError> { debug!("Got burn_block event"); let mut body = String::new(); if let Err(e) = request.as_reader().read_to_string(&mut body) { diff --git a/libsigner/src/http.rs b/libsigner/src/http.rs index fe841415a9..adb1f509f1 100644 --- a/libsigner/src/http.rs +++ b/libsigner/src/http.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by diff --git a/libsigner/src/libsigner.rs b/libsigner/src/libsigner.rs index 0b16e97e19..874ebad1f4 100644 --- a/libsigner/src/libsigner.rs +++ b/libsigner/src/libsigner.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -39,18 +39,18 @@ mod tests; mod error; mod events; mod http; -mod messages; mod runloop; mod session; mod signer_set; +/// v0 signer related code +pub mod v0; +/// v1 signer related code +pub mod v1; pub use crate::error::{EventError, RPCError}; pub use crate::events::{ - BlockProposalSigners, EventReceiver, EventStopSignaler, SignerEvent, SignerEventReceiver, - SignerStopSignaler, -}; -pub use crate::messages::{ - BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, + BlockProposal, EventReceiver, EventStopSignaler, SignerEvent, SignerEventReceiver, + SignerEventTrait, SignerStopSignaler, }; pub use crate::runloop::{RunningSigner, Signer, SignerRunLoop}; pub use crate::session::{SignerSession, StackerDBSession}; diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index 0b7eb2dbcf..b0f026f35f 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -24,11 +24,12 @@ use std::thread; use std::thread::JoinHandle; use std::time::Duration; +use clarity::codec::StacksMessageCodec; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; use crate::error::EventError; -use crate::events::{EventReceiver, EventStopSignaler, SignerEvent}; +use crate::events::{EventReceiver, EventStopSignaler, SignerEvent, SignerEventTrait}; /// Some libcs, like musl, have a very small stack size. /// Make sure it's big enough. @@ -40,7 +41,7 @@ const STDERR: i32 = 2; /// Trait describing the needful components of a top-level runloop. /// This is where the signer business logic would go. /// Implement this, and you get all the multithreaded setup for free. -pub trait SignerRunLoop { +pub trait SignerRunLoop { /// Hint to set how long to wait for new events fn set_event_timeout(&mut self, timeout: Duration); /// Getter for the event poll timeout @@ -50,7 +51,7 @@ pub trait SignerRunLoop { /// Returns None to keep running. fn run_one_pass( &mut self, - event: Option, + event: Option>, cmd: Option, res: Sender, ) -> Option; @@ -64,7 +65,7 @@ pub trait SignerRunLoop { /// This would run in a separate thread from the event receiver. fn main_loop( &mut self, - event_recv: Receiver, + event_recv: Receiver>, command_recv: Receiver, result_send: Sender, mut event_stop_signaler: EVST, @@ -93,7 +94,7 @@ pub trait SignerRunLoop { } /// The top-level signer implementation -pub struct Signer { +pub struct Signer { /// the runloop itself signer_loop: Option, /// the event receiver to use @@ -102,10 +103,12 @@ pub struct Signer { command_receiver: Option>, /// the result sender to use result_sender: Option>, + /// phantom data for the codec + phantom_data: PhantomData, } /// The running signer implementation -pub struct RunningSigner { +pub struct RunningSigner, R, T: SignerEventTrait> { /// join handle for signer runloop signer_join: JoinHandle>, /// join handle for event receiver @@ -114,7 +117,7 @@ pub struct RunningSigner { stop_signal: EV::ST, } -impl RunningSigner { +impl, R, T: SignerEventTrait> RunningSigner { /// Stop the signer, and get the final state pub fn stop(mut self) -> Option { // kill event receiver @@ -189,19 +192,20 @@ pub fn set_runloop_signal_handler(mut st }).expect("FATAL: failed to set signal handler"); } -impl Signer { +impl Signer { /// Create a new signer with the given runloop and event receiver. pub fn new( runloop: SL, event_receiver: EV, command_receiver: Receiver, result_sender: Sender, - ) -> Signer { + ) -> Signer { Signer { signer_loop: Some(runloop), event_receiver: Some(event_receiver), command_receiver: Some(command_receiver), result_sender: Some(result_sender), + phantom_data: PhantomData, } } } @@ -209,9 +213,10 @@ impl Signer { impl< CMD: Send + 'static, R: Send + 'static, - SL: SignerRunLoop + Send + 'static, - EV: EventReceiver + Send + 'static, - > Signer + T: SignerEventTrait + 'static, + SL: SignerRunLoop + Send + 'static, + EV: EventReceiver + Send + 'static, + > Signer { /// This is a helper function to spawn both the runloop and event receiver in their own /// threads. Advanced signers may not need this method, and instead opt to run the receiver @@ -223,7 +228,7 @@ impl< /// /// On success, this method consumes the Signer and returns a RunningSigner with the relevant /// inter-thread communication primitives for the caller to shut down the system. - pub fn spawn(&mut self, bind_addr: SocketAddr) -> Result, EventError> { + pub fn spawn(&mut self, bind_addr: SocketAddr) -> Result, EventError> { let mut event_receiver = self .event_receiver .take() diff --git a/libsigner/src/session.rs b/libsigner/src/session.rs index 30966f897b..c13621392b 100644 --- a/libsigner/src/session.rs +++ b/libsigner/src/session.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -20,7 +20,8 @@ use std::str; use clarity::vm::types::QualifiedContractIdentifier; use libstackerdb::{ stackerdb_get_chunk_path, stackerdb_get_metadata_path, stackerdb_post_chunk_path, SlotMetadata, - StackerDBChunkAckData, StackerDBChunkData, + StackerDBChunkAckData, StackerDBChunkData, SIGNERS_STACKERDB_CHUNK_SIZE, + STACKERDB_MAX_CHUNK_SIZE, }; use stacks_common::codec::StacksMessageCodec; @@ -94,6 +95,7 @@ pub trait SignerSession { } /// signer session for a stackerdb instance +#[derive(Debug)] pub struct StackerDBSession { /// host we're talking to pub host: String, @@ -214,10 +216,23 @@ impl SignerSession for StackerDBSession { /// query the replica for zero or more latest chunks fn get_latest_chunks(&mut self, slot_ids: &[u32]) -> Result>>, RPCError> { let mut payloads = vec![]; + let limit = if self.stackerdb_contract_id.name.starts_with("signer") { + SIGNERS_STACKERDB_CHUNK_SIZE + } else { + usize::try_from(STACKERDB_MAX_CHUNK_SIZE) + .expect("infallible: StackerDB chunk size exceeds usize::MAX") + }; for slot_id in slot_ids.iter() { let path = stackerdb_get_chunk_path(self.stackerdb_contract_id.clone(), *slot_id, None); let chunk = match self.rpc_request("GET", &path, None, &[]) { - Ok(body_bytes) => Some(body_bytes), + Ok(body_bytes) => { + // Verify that the chunk is not too large + if body_bytes.len() > limit { + None + } else { + Some(body_bytes) + } + } Err(RPCError::HttpError(code)) => { if code != 404 { return Err(RPCError::HttpError(code)); diff --git a/libsigner/src/signer_set.rs b/libsigner/src/signer_set.rs index 119873fd1e..fdcb857faf 100644 --- a/libsigner/src/signer_set.rs +++ b/libsigner/src/signer_set.rs @@ -86,7 +86,7 @@ impl SignerEntries { weight_end = weight_start + entry.weight; let key_ids: HashSet = (weight_start..weight_end).collect(); for key_id in key_ids.iter() { - wsts_key_ids.insert(*key_id, ecdsa_pk.clone()); + wsts_key_ids.insert(*key_id, ecdsa_pk); } signer_key_ids.insert(signer_id, (weight_start..weight_end).collect()); coordinator_key_ids.insert(signer_id, key_ids); diff --git a/libsigner/src/tests/http.rs b/libsigner/src/tests/http.rs index d2b052fae9..d0f3887b45 100644 --- a/libsigner/src/tests/http.rs +++ b/libsigner/src/tests/http.rs @@ -264,7 +264,7 @@ fn test_run_http_request_with_body() { let result_chunked = run_http_request( &mut msock_chunked, - &"127.0.0.1:20443", + "127.0.0.1:20443", verb, path, content_type, @@ -275,7 +275,7 @@ fn test_run_http_request_with_body() { let result_plain = run_http_request( &mut msock_plain, - &"127.0.0.1:20443", + "127.0.0.1:20443", verb, path, content_type, @@ -321,7 +321,7 @@ fn test_run_http_request_no_body() { let result_chunked = run_http_request( &mut msock_chunked, - &"127.0.0.1:20443", + "127.0.0.1:20443", verb, path, content_type, @@ -330,7 +330,7 @@ fn test_run_http_request_no_body() { .unwrap(); let result_plain = run_http_request( &mut msock_plain, - &"127.0.0.1:20443", + "127.0.0.1:20443", verb, path, content_type, diff --git a/libsigner/src/tests/mod.rs b/libsigner/src/tests/mod.rs index 1d3e1f3cc0..c584572ba7 100644 --- a/libsigner/src/tests/mod.rs +++ b/libsigner/src/tests/mod.rs @@ -16,6 +16,7 @@ mod http; +use std::fmt::Debug; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; use std::sync::mpsc::{channel, Receiver, Sender}; @@ -36,20 +37,20 @@ use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::sleep_ms; use wsts::net::{DkgBegin, Packet}; -use crate::events::SignerEvent; -use crate::messages::SignerMessage; +use crate::events::{SignerEvent, SignerEventTrait}; +use crate::v1::messages::SignerMessage; use crate::{Signer, SignerEventReceiver, SignerRunLoop}; /// Simple runloop implementation. It receives `max_events` events and returns `events` from the /// last call to `run_one_pass` as its final state. -struct SimpleRunLoop { +struct SimpleRunLoop { poll_timeout: Duration, - events: Vec, + events: Vec>, max_events: usize, } -impl SimpleRunLoop { - pub fn new(max_events: usize) -> SimpleRunLoop { +impl SimpleRunLoop { + pub fn new(max_events: usize) -> SimpleRunLoop { SimpleRunLoop { poll_timeout: Duration::from_millis(100), events: vec![], @@ -62,7 +63,7 @@ enum Command { Empty, } -impl SignerRunLoop, Command> for SimpleRunLoop { +impl SignerRunLoop>, Command, T> for SimpleRunLoop { fn set_event_timeout(&mut self, timeout: Duration) { self.poll_timeout = timeout; } @@ -73,10 +74,10 @@ impl SignerRunLoop, Command> for SimpleRunLoop { fn run_one_pass( &mut self, - event: Option, + event: Option>, _cmd: Option, - _res: Sender>, - ) -> Option> { + _res: Sender>>, + ) -> Option>> { debug!("Got event: {:?}", &event); if let Some(event) = event { self.events.push(event); @@ -161,7 +162,7 @@ fn test_simple_signer() { .unwrap() }); - let sent_events: Vec = chunks + let sent_events: Vec> = chunks .iter() .map(|chunk| { let msg = chunk.modified_slots[0].data.clone(); @@ -211,7 +212,7 @@ fn test_status_endpoint() { sleep_ms(3000); let accepted_events = running_signer.stop().unwrap(); - let sent_events: Vec = vec![SignerEvent::StatusCheck]; + let sent_events: Vec> = vec![SignerEvent::StatusCheck]; assert_eq!(sent_events, accepted_events); mock_stacks_node.join().unwrap(); diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs new file mode 100644 index 0000000000..4b9cd74dca --- /dev/null +++ b/libsigner/src/v0/messages.rs @@ -0,0 +1,572 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Messages in the signer-miner interaction have a multi-level hierarchy. +//! Signers send messages to each other through Packet messages. These messages, +//! as well as `BlockResponse`, `Transactions`, and `DkgResults` messages are stored +//! StackerDBs based on the `MessageSlotID` for the particular message type. This is a +//! shared identifier space between the four message kinds and their subtypes. +//! +//! These four message kinds are differentiated with a `SignerMessageTypePrefix` +//! and the `SignerMessage` enum. + +use std::fmt::{Debug, Display}; +use std::io::{Read, Write}; +use std::net::{SocketAddr, TcpListener, TcpStream}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::mpsc::Sender; +use std::sync::Arc; + +use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; +use blockstack_lib::chainstate::stacks::StacksTransaction; +use blockstack_lib::net::api::postblock_proposal::{ + BlockValidateReject, BlockValidateResponse, ValidateRejectCode, +}; +use blockstack_lib::util_lib::boot::boot_code_id; +use clarity::util::retry::BoundReader; +use clarity::util::secp256k1::MessageSignature; +use clarity::vm::types::serialization::SerializationError; +use clarity::vm::types::QualifiedContractIdentifier; +use hashbrown::{HashMap, HashSet}; +use serde::{Deserialize, Serialize}; +use stacks_common::codec::{ + read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError, + StacksMessageCodec, +}; +use stacks_common::consts::SIGNER_SLOTS_PER_USER; +use stacks_common::util::hash::Sha512Trunc256Sum; +use tiny_http::{ + Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, +}; + +use crate::http::{decode_http_body, decode_http_request}; +use crate::{BlockProposal, EventError}; + +define_u8_enum!( +/// Enum representing the stackerdb message identifier: this is +/// the contract index in the signers contracts (i.e., X in signers-0-X) +MessageSlotID { + /// Block Proposal message from miners + BlockProposal = 0, + /// Block Response message from signers + BlockResponse = 1 +}); + +define_u8_enum!( +/// Enum representing the SignerMessage type prefix +SignerMessageTypePrefix { + /// Block Proposal message from miners + BlockProposal = 0, + /// Block Response message from signers + BlockResponse = 1 +}); + +#[cfg_attr(test, mutants::skip)] +impl MessageSlotID { + /// Return the StackerDB contract corresponding to messages of this type + pub fn stacker_db_contract( + &self, + mainnet: bool, + reward_cycle: u64, + ) -> QualifiedContractIdentifier { + NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) + } + + /// Return the u32 identifier for the message slot (used to index the contract that stores it) + pub fn to_u32(self) -> u32 { + self.to_u8().into() + } +} + +#[cfg_attr(test, mutants::skip)] +impl Display for MessageSlotID { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}({})", self, self.to_u8()) + } +} + +impl TryFrom for SignerMessageTypePrefix { + type Error = CodecError; + fn try_from(value: u8) -> Result { + Self::from_u8(value).ok_or_else(|| { + CodecError::DeserializeError(format!("Unknown signer message type prefix: {value}")) + }) + } +} + +impl From<&SignerMessage> for SignerMessageTypePrefix { + #[cfg_attr(test, mutants::skip)] + fn from(message: &SignerMessage) -> Self { + match message { + SignerMessage::BlockProposal(_) => SignerMessageTypePrefix::BlockProposal, + SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, + } + } +} + +/// The messages being sent through the stacker db contracts +#[derive(Clone, PartialEq, Serialize, Deserialize)] +pub enum SignerMessage { + /// The block proposal from miners for signers to observe and sign + BlockProposal(BlockProposal), + /// The block response from signers for miners to observe + BlockResponse(BlockResponse), +} + +impl Debug for SignerMessage { + #[cfg_attr(test, mutants::skip)] + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::BlockProposal(b) => Debug::fmt(b, f), + Self::BlockResponse(b) => Debug::fmt(b, f), + } + } +} + +impl SignerMessage { + /// Helper function to determine the slot ID for the provided stacker-db writer id + #[cfg_attr(test, mutants::skip)] + pub fn msg_id(&self) -> MessageSlotID { + match self { + Self::BlockProposal(_) => MessageSlotID::BlockProposal, + Self::BlockResponse(_) => MessageSlotID::BlockResponse, + } + } +} + +impl StacksMessageCodec for SignerMessage { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &(SignerMessageTypePrefix::from(self) as u8))?; + match self { + SignerMessage::BlockProposal(block_proposal) => { + write_next(fd, block_proposal)?; + } + SignerMessage::BlockResponse(block_response) => { + write_next(fd, block_response)?; + } + }; + Ok(()) + } + + #[cfg_attr(test, mutants::skip)] + fn consensus_deserialize(fd: &mut R) -> Result { + let type_prefix_byte = read_next::(fd)?; + let type_prefix = SignerMessageTypePrefix::try_from(type_prefix_byte)?; + let message = match type_prefix { + SignerMessageTypePrefix::BlockProposal => { + let block_proposal = read_next::(fd)?; + SignerMessage::BlockProposal(block_proposal) + } + SignerMessageTypePrefix::BlockResponse => { + let block_response = read_next::(fd)?; + SignerMessage::BlockResponse(block_response) + } + }; + Ok(message) + } +} + +/// Work around for the fact that a lot of the structs being desierialized are not defined in messages.rs +pub trait StacksMessageCodecExtensions: Sized { + /// Serialize the struct to the provided writer + fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError>; + /// Deserialize the struct from the provided reader + fn inner_consensus_deserialize(fd: &mut R) -> Result; +} + +define_u8_enum!( +/// Enum representing the reject code type prefix +RejectCodeTypePrefix { + /// The block was rejected due to validation issues + ValidationFailed = 0, + /// The block was rejected due to connectivity issues with the signer + ConnectivityIssues = 1 +}); + +impl TryFrom for RejectCodeTypePrefix { + type Error = CodecError; + fn try_from(value: u8) -> Result { + Self::from_u8(value).ok_or_else(|| { + CodecError::DeserializeError(format!("Unknown reject code type prefix: {value}")) + }) + } +} + +impl From<&RejectCode> for RejectCodeTypePrefix { + fn from(reject_code: &RejectCode) -> Self { + match reject_code { + RejectCode::ValidationFailed(_) => RejectCodeTypePrefix::ValidationFailed, + RejectCode::ConnectivityIssues => RejectCodeTypePrefix::ConnectivityIssues, + } + } +} + +/// This enum is used to supply a `reason_code` for block rejections +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum RejectCode { + /// RPC endpoint Validation failed + ValidationFailed(ValidateRejectCode), + /// The block was rejected due to connectivity issues with the signer + ConnectivityIssues, +} + +define_u8_enum!( +/// Enum representing the BlockResponse type prefix +BlockResponseTypePrefix { + /// An accepted block response + Accepted = 0, + /// A rejected block response + Rejected = 1 +}); + +impl TryFrom for BlockResponseTypePrefix { + type Error = CodecError; + fn try_from(value: u8) -> Result { + Self::from_u8(value).ok_or_else(|| { + CodecError::DeserializeError(format!("Unknown block response type prefix: {value}")) + }) + } +} + +impl From<&BlockResponse> for BlockResponseTypePrefix { + fn from(block_response: &BlockResponse) -> Self { + match block_response { + BlockResponse::Accepted(_) => BlockResponseTypePrefix::Accepted, + BlockResponse::Rejected(_) => BlockResponseTypePrefix::Rejected, + } + } +} + +/// The response that a signer sends back to observing miners +/// either accepting or rejecting a Nakamoto block with the corresponding reason +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub enum BlockResponse { + /// The Nakamoto block was accepted and therefore signed + Accepted((Sha512Trunc256Sum, MessageSignature)), + /// The Nakamoto block was rejected and therefore not signed + Rejected(BlockRejection), +} + +#[cfg_attr(test, mutants::skip)] +impl std::fmt::Display for BlockResponse { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BlockResponse::Accepted(a) => { + write!( + f, + "BlockAccepted: signer_sighash = {}, signature = {}", + a.0, a.1 + ) + } + BlockResponse::Rejected(r) => { + write!( + f, + "BlockRejected: signer_sighash = {}, code = {}, reason = {}", + r.reason_code, r.reason, r.signer_signature_hash + ) + } + } + } +} + +impl BlockResponse { + /// Create a new accepted BlockResponse for the provided block signer signature hash and signature + pub fn accepted(hash: Sha512Trunc256Sum, sig: MessageSignature) -> Self { + Self::Accepted((hash, sig)) + } + + /// Create a new rejected BlockResponse for the provided block signer signature hash and rejection code + pub fn rejected(hash: Sha512Trunc256Sum, reject_code: RejectCode) -> Self { + Self::Rejected(BlockRejection::new(hash, reject_code)) + } +} + +impl StacksMessageCodec for BlockResponse { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &(BlockResponseTypePrefix::from(self) as u8))?; + match self { + BlockResponse::Accepted((hash, sig)) => { + write_next(fd, hash)?; + write_next(fd, sig)?; + } + BlockResponse::Rejected(rejection) => { + write_next(fd, rejection)?; + } + }; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let type_prefix_byte = read_next::(fd)?; + let type_prefix = BlockResponseTypePrefix::try_from(type_prefix_byte)?; + let response = match type_prefix { + BlockResponseTypePrefix::Accepted => { + let hash = read_next::(fd)?; + let sig = read_next::(fd)?; + BlockResponse::Accepted((hash, sig)) + } + BlockResponseTypePrefix::Rejected => { + let rejection = read_next::(fd)?; + BlockResponse::Rejected(rejection) + } + }; + Ok(response) + } +} + +/// A rejection response from a signer for a proposed block +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BlockRejection { + /// The reason for the rejection + pub reason: String, + /// The reason code for the rejection + pub reason_code: RejectCode, + /// The signer signature hash of the block that was rejected + pub signer_signature_hash: Sha512Trunc256Sum, +} + +impl BlockRejection { + /// Create a new BlockRejection for the provided block and reason code + pub fn new(signer_signature_hash: Sha512Trunc256Sum, reason_code: RejectCode) -> Self { + Self { + reason: reason_code.to_string(), + reason_code, + signer_signature_hash, + } + } +} + +impl StacksMessageCodec for BlockRejection { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.reason.as_bytes().to_vec())?; + write_next(fd, &self.reason_code)?; + write_next(fd, &self.signer_signature_hash)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let reason_bytes = read_next::, _>(fd)?; + let reason = String::from_utf8(reason_bytes).map_err(|e| { + CodecError::DeserializeError(format!("Failed to decode reason string: {:?}", &e)) + })?; + let reason_code = read_next::(fd)?; + let signer_signature_hash = read_next::(fd)?; + Ok(Self { + reason, + reason_code, + signer_signature_hash, + }) + } +} + +impl From for BlockRejection { + fn from(reject: BlockValidateReject) -> Self { + Self { + reason: reject.reason, + reason_code: RejectCode::ValidationFailed(reject.reason_code), + signer_signature_hash: reject.signer_signature_hash, + } + } +} + +impl StacksMessageCodec for RejectCode { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &(RejectCodeTypePrefix::from(self) as u8))?; + // Do not do a single match here as we may add other variants in the future and don't want to miss adding it + match self { + RejectCode::ValidationFailed(code) => write_next(fd, &(*code as u8))?, + RejectCode::ConnectivityIssues => { + // No additional data to serialize / deserialize + } + }; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let type_prefix_byte = read_next::(fd)?; + let type_prefix = RejectCodeTypePrefix::try_from(type_prefix_byte)?; + let code = match type_prefix { + RejectCodeTypePrefix::ValidationFailed => RejectCode::ValidationFailed( + ValidateRejectCode::try_from(read_next::(fd)?).map_err(|e| { + CodecError::DeserializeError(format!( + "Failed to decode validation reject code: {:?}", + &e + )) + })?, + ), + RejectCodeTypePrefix::ConnectivityIssues => RejectCode::ConnectivityIssues, + }; + Ok(code) + } +} + +#[cfg_attr(test, mutants::skip)] +impl std::fmt::Display for RejectCode { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + RejectCode::ValidationFailed(code) => write!(f, "Validation failed: {:?}", code), + RejectCode::ConnectivityIssues => write!( + f, + "The block was rejected due to connectivity issues with the signer." + ), + } + } +} + +impl From for SignerMessage { + fn from(block_response: BlockResponse) -> Self { + Self::BlockResponse(block_response) + } +} + +impl From for SignerMessage { + fn from(block_rejection: BlockRejection) -> Self { + Self::BlockResponse(BlockResponse::Rejected(block_rejection)) + } +} + +impl From for SignerMessage { + fn from(rejection: BlockValidateReject) -> Self { + Self::BlockResponse(BlockResponse::Rejected(rejection.into())) + } +} + +#[cfg(test)] +mod test { + use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; + use blockstack_lib::chainstate::stacks::{ + ThresholdSignature, TransactionAnchorMode, TransactionAuth, TransactionPayload, + TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, + }; + use blockstack_lib::util_lib::strings::StacksString; + use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; + use clarity::util::hash::MerkleTree; + use clarity::util::secp256k1::MessageSignature; + use rand::{thread_rng, Rng, RngCore}; + use rand_core::OsRng; + use stacks_common::bitvec::BitVec; + use stacks_common::consts::CHAIN_ID_TESTNET; + use stacks_common::types::chainstate::StacksPrivateKey; + + use super::{StacksMessageCodecExtensions, *}; + + #[test] + fn signer_slots_count_is_sane() { + let slot_identifiers_len = MessageSlotID::ALL.len(); + assert!( + SIGNER_SLOTS_PER_USER as usize >= slot_identifiers_len, + "stacks_common::SIGNER_SLOTS_PER_USER ({}) must be >= slot identifiers ({})", + SIGNER_SLOTS_PER_USER, + slot_identifiers_len, + ); + } + + #[test] + fn serde_reject_code() { + let code = RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock); + let serialized_code = code.serialize_to_vec(); + let deserialized_code = read_next::(&mut &serialized_code[..]) + .expect("Failed to deserialize RejectCode"); + assert_eq!(code, deserialized_code); + + let code = RejectCode::ConnectivityIssues; + let serialized_code = code.serialize_to_vec(); + let deserialized_code = read_next::(&mut &serialized_code[..]) + .expect("Failed to deserialize RejectCode"); + assert_eq!(code, deserialized_code); + } + + #[test] + fn serde_block_rejection() { + let rejection = BlockRejection::new( + Sha512Trunc256Sum([0u8; 32]), + RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), + ); + let serialized_rejection = rejection.serialize_to_vec(); + let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) + .expect("Failed to deserialize BlockRejection"); + assert_eq!(rejection, deserialized_rejection); + + let rejection = + BlockRejection::new(Sha512Trunc256Sum([1u8; 32]), RejectCode::ConnectivityIssues); + let serialized_rejection = rejection.serialize_to_vec(); + let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) + .expect("Failed to deserialize BlockRejection"); + assert_eq!(rejection, deserialized_rejection); + } + + #[test] + fn serde_block_response() { + let response = + BlockResponse::Accepted((Sha512Trunc256Sum([0u8; 32]), MessageSignature::empty())); + let serialized_response = response.serialize_to_vec(); + let deserialized_response = read_next::(&mut &serialized_response[..]) + .expect("Failed to deserialize BlockResponse"); + assert_eq!(response, deserialized_response); + + let response = BlockResponse::Rejected(BlockRejection::new( + Sha512Trunc256Sum([1u8; 32]), + RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), + )); + let serialized_response = response.serialize_to_vec(); + let deserialized_response = read_next::(&mut &serialized_response[..]) + .expect("Failed to deserialize BlockResponse"); + assert_eq!(response, deserialized_response); + } + + #[test] + fn serde_signer_message() { + let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(( + Sha512Trunc256Sum([2u8; 32]), + MessageSignature::empty(), + ))); + let serialized_signer_message = signer_message.serialize_to_vec(); + let deserialized_signer_message = + read_next::(&mut &serialized_signer_message[..]) + .expect("Failed to deserialize SignerMessage"); + assert_eq!(signer_message, deserialized_signer_message); + + let header = NakamotoBlockHeader::empty(); + let mut block = NakamotoBlock { + header, + txs: vec![], + }; + let tx_merkle_root = { + let txid_vecs = block + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + block.header.tx_merkle_root = tx_merkle_root; + + let block_proposal = BlockProposal { + block, + burn_height: thread_rng().next_u64(), + reward_cycle: thread_rng().next_u64(), + }; + let signer_message = SignerMessage::BlockProposal(block_proposal); + let serialized_signer_message = signer_message.serialize_to_vec(); + let deserialized_signer_message = + read_next::(&mut &serialized_signer_message[..]) + .expect("Failed to deserialize SignerMessage"); + assert_eq!(signer_message, deserialized_signer_message); + } +} diff --git a/libsigner/src/v0/mod.rs b/libsigner/src/v0/mod.rs new file mode 100644 index 0000000000..703acb85f6 --- /dev/null +++ b/libsigner/src/v0/mod.rs @@ -0,0 +1,17 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// Messages for the v0 signer +pub mod messages; diff --git a/libsigner/src/messages.rs b/libsigner/src/v1/messages.rs similarity index 90% rename from libsigner/src/messages.rs rename to libsigner/src/v1/messages.rs index 1b6e7f179f..a1422654ac 100644 --- a/libsigner/src/messages.rs +++ b/libsigner/src/v1/messages.rs @@ -38,6 +38,7 @@ use blockstack_lib::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, ValidateRejectCode, }; use blockstack_lib::util_lib::boot::boot_code_id; +use clarity::util::retry::BoundReader; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::QualifiedContractIdentifier; use hashbrown::{HashMap, HashSet}; @@ -94,16 +95,27 @@ MessageSlotID { /// Transactions list for miners and signers to observe Transactions = 11, /// DKG Results - DkgResults = 12 + DkgResults = 12, + /// Persisted encrypted signer state containing DKG shares + EncryptedSignerState = 13 }); -define_u8_enum!(SignerMessageTypePrefix { +define_u8_enum!( +/// Enum representing the signer message type prefix +SignerMessageTypePrefix { + /// A block response message BlockResponse = 0, + /// A wsts packet message Packet = 1, + /// A list of transactions that a signer cares about Transactions = 2, - DkgResults = 3 + /// The results of a successful DKG + DkgResults = 3, + /// The encrypted state of the signer to be persisted + EncryptedSignerState = 4 }); +#[cfg_attr(test, mutants::skip)] impl MessageSlotID { /// Return the StackerDB contract corresponding to messages of this type pub fn stacker_db_contract( @@ -136,26 +148,40 @@ impl TryFrom for SignerMessageTypePrefix { } impl From<&SignerMessage> for SignerMessageTypePrefix { + #[cfg_attr(test, mutants::skip)] fn from(message: &SignerMessage) -> Self { match message { SignerMessage::Packet(_) => SignerMessageTypePrefix::Packet, SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, SignerMessage::Transactions(_) => SignerMessageTypePrefix::Transactions, SignerMessage::DkgResults { .. } => SignerMessageTypePrefix::DkgResults, + SignerMessage::EncryptedSignerState(_) => SignerMessageTypePrefix::EncryptedSignerState, } } } -define_u8_enum!(MessageTypePrefix { +define_u8_enum!( +/// Enum representing the message type prefix +MessageTypePrefix { + /// DkgBegin message DkgBegin = 0, + /// DkgPrivateBegin message DkgPrivateBegin = 1, + /// DkgEndBegin message DkgEndBegin = 2, + /// DkgEnd message DkgEnd = 3, + /// DkgPublicShares message DkgPublicShares = 4, + /// DkgPrivateShares message DkgPrivateShares = 5, + /// NonceRequest message NonceRequest = 6, + /// NonceResponse message NonceResponse = 7, + /// SignatureShareRequest message SignatureShareRequest = 8, + /// SignatureShareResponse message SignatureShareResponse = 9 }); @@ -185,13 +211,22 @@ impl TryFrom for MessageTypePrefix { } } -define_u8_enum!(RejectCodeTypePrefix{ +define_u8_enum!( +/// Enum representing the reject code type prefix +RejectCodeTypePrefix { + /// Validation failed ValidationFailed = 0, + /// Signed rejection SignedRejection = 1, + /// Insufficient signers InsufficientSigners = 2, + /// Missing transactions MissingTransactions = 3, + /// Connectivity issues ConnectivityIssues = 4, + /// Nonce timeout NonceTimeout = 5, + /// Aggregator error AggregatorError = 6 }); @@ -234,9 +269,12 @@ pub enum SignerMessage { /// The polynomial commits used to construct the aggregate key party_polynomials: Vec<(u32, PolyCommitment)>, }, + /// The encrypted state of the signer to be persisted + EncryptedSignerState(Vec), } impl Debug for SignerMessage { + #[cfg_attr(test, mutants::skip)] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::BlockResponse(b) => Debug::fmt(b, f), @@ -255,12 +293,16 @@ impl Debug for SignerMessage { .field("party_polynomials", &party_polynomials) .finish() } + Self::EncryptedSignerState(s) => { + f.debug_tuple("EncryptedSignerState").field(s).finish() + } } } } impl SignerMessage { /// Helper function to determine the slot ID for the provided stacker-db writer id + #[cfg_attr(test, mutants::skip)] pub fn msg_id(&self) -> MessageSlotID { match self { Self::Packet(packet) => match packet.msg { @@ -278,6 +320,7 @@ impl SignerMessage { Self::BlockResponse(_) => MessageSlotID::BlockResponse, Self::Transactions(_) => MessageSlotID::Transactions, Self::DkgResults { .. } => MessageSlotID::DkgResults, + Self::EncryptedSignerState(_) => MessageSlotID::EncryptedSignerState, } } } @@ -345,10 +388,14 @@ impl StacksMessageCodec for SignerMessage { party_polynomials.iter().map(|(a, b)| (a, b)), )?; } + SignerMessage::EncryptedSignerState(encrypted_state) => { + write_next(fd, encrypted_state)?; + } }; Ok(()) } + #[cfg_attr(test, mutants::skip)] fn consensus_deserialize(fd: &mut R) -> Result { let type_prefix_byte = read_next::(fd)?; let type_prefix = SignerMessageTypePrefix::try_from(type_prefix_byte)?; @@ -383,6 +430,15 @@ impl StacksMessageCodec for SignerMessage { party_polynomials, } } + SignerMessageTypePrefix::EncryptedSignerState => { + // Typically the size of the signer state is much smaller, but in the fully degenerate case the size of the persisted state is + // 2800 * 32 * 4 + C for some small constant C. + // To have some margin, we're expanding the left term with an additional factor 4 + let max_encrypted_state_size = 2800 * 32 * 4 * 4; + let mut bound_reader = BoundReader::from_reader(fd, max_encrypted_state_size); + let encrypted_state = read_next::<_, _>(&mut bound_reader)?; + SignerMessage::EncryptedSignerState(encrypted_state) + } }; Ok(message) } @@ -390,7 +446,9 @@ impl StacksMessageCodec for SignerMessage { /// Work around for the fact that a lot of the structs being desierialized are not defined in messages.rs pub trait StacksMessageCodecExtensions: Sized { + /// Serialize the struct to the provided writer fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError>; + /// Deserialize the struct from the provided reader fn inner_consensus_deserialize(fd: &mut R) -> Result; } @@ -510,50 +568,87 @@ impl StacksMessageCodecExtensions for HashSet { } } +define_u8_enum!( +/// Enum representing the DKG failure type prefix +DkgFailureTypePrefix { + /// Bad state + BadState = 0, + /// Missing public shares + MissingPublicShares = 1, + /// Bad public shares + BadPublicShares = 2, + /// Missing private shares + MissingPrivateShares = 3, + /// Bad private shares + BadPrivateShares = 4 +}); + +impl TryFrom for DkgFailureTypePrefix { + type Error = CodecError; + fn try_from(value: u8) -> Result { + Self::from_u8(value).ok_or_else(|| { + CodecError::DeserializeError(format!("Unknown DKG failure type prefix: {value}")) + }) + } +} + +impl From<&DkgFailure> for DkgFailureTypePrefix { + fn from(failure: &DkgFailure) -> Self { + match failure { + DkgFailure::BadState => DkgFailureTypePrefix::BadState, + DkgFailure::MissingPublicShares(_) => DkgFailureTypePrefix::MissingPublicShares, + DkgFailure::BadPublicShares(_) => DkgFailureTypePrefix::BadPublicShares, + DkgFailure::MissingPrivateShares(_) => DkgFailureTypePrefix::MissingPrivateShares, + DkgFailure::BadPrivateShares(_) => DkgFailureTypePrefix::BadPrivateShares, + } + } +} + impl StacksMessageCodecExtensions for DkgFailure { fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &(DkgFailureTypePrefix::from(self) as u8))?; match self { - DkgFailure::BadState => write_next(fd, &0u8), + DkgFailure::BadState => { + // No additional data to serialize + } DkgFailure::MissingPublicShares(shares) => { - write_next(fd, &1u8)?; - shares.inner_consensus_serialize(fd) + shares.inner_consensus_serialize(fd)?; } DkgFailure::BadPublicShares(shares) => { - write_next(fd, &2u8)?; - shares.inner_consensus_serialize(fd) + shares.inner_consensus_serialize(fd)?; } DkgFailure::MissingPrivateShares(shares) => { - write_next(fd, &3u8)?; - shares.inner_consensus_serialize(fd) + shares.inner_consensus_serialize(fd)?; } DkgFailure::BadPrivateShares(shares) => { - write_next(fd, &4u8)?; write_next(fd, &(shares.len() as u32))?; for (id, share) in shares { write_next(fd, id)?; share.inner_consensus_serialize(fd)?; } - Ok(()) } } + Ok(()) } + fn inner_consensus_deserialize(fd: &mut R) -> Result { - let failure_type_prefix = read_next::(fd)?; + let failure_type_prefix_byte = read_next::(fd)?; + let failure_type_prefix = DkgFailureTypePrefix::try_from(failure_type_prefix_byte)?; let failure_type = match failure_type_prefix { - 0 => DkgFailure::BadState, - 1 => { + DkgFailureTypePrefix::BadState => DkgFailure::BadState, + DkgFailureTypePrefix::MissingPublicShares => { let set = HashSet::::inner_consensus_deserialize(fd)?; DkgFailure::MissingPublicShares(set) } - 2 => { + DkgFailureTypePrefix::BadPublicShares => { let set = HashSet::::inner_consensus_deserialize(fd)?; DkgFailure::BadPublicShares(set) } - 3 => { + DkgFailureTypePrefix::MissingPrivateShares => { let set = HashSet::::inner_consensus_deserialize(fd)?; DkgFailure::MissingPrivateShares(set) } - 4 => { + DkgFailureTypePrefix::BadPrivateShares => { let mut map = HashMap::new(); let len = read_next::(fd)?; for _ in 0..len { @@ -563,12 +658,6 @@ impl StacksMessageCodecExtensions for DkgFailure { } DkgFailure::BadPrivateShares(map) } - _ => { - return Err(CodecError::DeserializeError(format!( - "Unknown DkgFailure type prefix: {}", - failure_type_prefix - ))) - } }; Ok(failure_type) } @@ -620,34 +709,60 @@ impl StacksMessageCodecExtensions for DkgEndBegin { } } +define_u8_enum!( +/// Enum representing the DKG status type prefix +DkgStatusTypePrefix { + /// Success + Success = 0, + /// Failure + Failure = 1 +}); + +impl TryFrom for DkgStatusTypePrefix { + type Error = CodecError; + fn try_from(value: u8) -> Result { + Self::from_u8(value).ok_or_else(|| { + CodecError::DeserializeError(format!("Unknown DKG status type prefix: {value}")) + }) + } +} + +impl From<&DkgStatus> for DkgStatusTypePrefix { + fn from(status: &DkgStatus) -> Self { + match status { + DkgStatus::Success => DkgStatusTypePrefix::Success, + DkgStatus::Failure(_) => DkgStatusTypePrefix::Failure, + } + } +} + impl StacksMessageCodecExtensions for DkgEnd { fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &self.dkg_id)?; write_next(fd, &self.signer_id)?; + write_next(fd, &(DkgStatusTypePrefix::from(&self.status) as u8))?; match &self.status { - DkgStatus::Success => write_next(fd, &0u8), + DkgStatus::Success => { + // No additional data to serialize + } DkgStatus::Failure(failure) => { - write_next(fd, &1u8)?; - failure.inner_consensus_serialize(fd) + failure.inner_consensus_serialize(fd)?; } } + Ok(()) } + fn inner_consensus_deserialize(fd: &mut R) -> Result { let dkg_id = read_next::(fd)?; let signer_id = read_next::(fd)?; - let status_type_prefix = read_next::(fd)?; + let status_type_prefix_byte = read_next::(fd)?; + let status_type_prefix = DkgStatusTypePrefix::try_from(status_type_prefix_byte)?; let status = match status_type_prefix { - 0 => DkgStatus::Success, - 1 => { + DkgStatusTypePrefix::Success => DkgStatus::Success, + DkgStatusTypePrefix::Failure => { let failure = DkgFailure::inner_consensus_deserialize(fd)?; DkgStatus::Failure(failure) } - _ => { - return Err(CodecError::DeserializeError(format!( - "Unknown DKG status type prefix: {}", - status_type_prefix - ))) - } }; Ok(DkgEnd { dkg_id, @@ -1008,6 +1123,33 @@ impl StacksMessageCodecExtensions for Packet { } } +define_u8_enum!( +/// Enum representing the block response type prefix +BlockResponseTypePrefix { + /// Accepted + Accepted = 0, + /// Rejected + Rejected = 1 +}); + +impl TryFrom for BlockResponseTypePrefix { + type Error = CodecError; + fn try_from(value: u8) -> Result { + Self::from_u8(value).ok_or_else(|| { + CodecError::DeserializeError(format!("Unknown block response type prefix: {value}")) + }) + } +} + +impl From<&BlockResponse> for BlockResponseTypePrefix { + fn from(block_response: &BlockResponse) -> Self { + match block_response { + BlockResponse::Accepted(_) => BlockResponseTypePrefix::Accepted, + BlockResponse::Rejected(_) => BlockResponseTypePrefix::Rejected, + } + } +} + /// The response that a signer sends back to observing miners /// either accepting or rejecting a Nakamoto block with the corresponding reason #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] @@ -1056,14 +1198,13 @@ impl BlockResponse { impl StacksMessageCodec for BlockResponse { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &(BlockResponseTypePrefix::from(self) as u8))?; match self { BlockResponse::Accepted((hash, sig)) => { - write_next(fd, &0u8)?; write_next(fd, hash)?; write_next(fd, sig)?; } BlockResponse::Rejected(rejection) => { - write_next(fd, &1u8)?; write_next(fd, rejection)?; } }; @@ -1071,27 +1212,23 @@ impl StacksMessageCodec for BlockResponse { } fn consensus_deserialize(fd: &mut R) -> Result { - let type_prefix = read_next::(fd)?; + let type_prefix_byte = read_next::(fd)?; + let type_prefix = BlockResponseTypePrefix::try_from(type_prefix_byte)?; let response = match type_prefix { - 0 => { + BlockResponseTypePrefix::Accepted => { let hash = read_next::(fd)?; let sig = read_next::(fd)?; BlockResponse::Accepted((hash, sig)) } - 1 => { + BlockResponseTypePrefix::Rejected => { let rejection = read_next::(fd)?; BlockResponse::Rejected(rejection) } - _ => { - return Err(CodecError::DeserializeError(format!( - "Unknown block response type prefix: {}", - type_prefix - ))) - } }; Ok(response) } } + /// A rejection response from a signer for a proposed block #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BlockRejection { @@ -1312,6 +1449,8 @@ mod test { use super::{StacksMessageCodecExtensions, *}; #[test] + #[should_panic] + // V1 signer slots do not have enough slots in Epoch 2.5. Something will need to be updated! fn signer_slots_count_is_sane() { let slot_identifiers_len = MessageSlotID::ALL.len(); assert!( diff --git a/libsigner/src/v1/mod.rs b/libsigner/src/v1/mod.rs new file mode 100644 index 0000000000..e5a691efb2 --- /dev/null +++ b/libsigner/src/v1/mod.rs @@ -0,0 +1,17 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// Messages for the v1 signer +pub mod messages; diff --git a/libstackerdb/src/libstackerdb.rs b/libstackerdb/src/libstackerdb.rs index 8c38d8be7b..507d2249f7 100644 --- a/libstackerdb/src/libstackerdb.rs +++ b/libstackerdb/src/libstackerdb.rs @@ -35,6 +35,8 @@ use stacks_common::util::secp256k1::MessageSignature; /// maximum chunk size (16 MB; same as MAX_PAYLOAD_SIZE) pub const STACKERDB_MAX_CHUNK_SIZE: u32 = 16 * 1024 * 1024; +/// CHUNK_SIZE constant for signers StackerDBs (2MB) +pub const SIGNERS_STACKERDB_CHUNK_SIZE: usize = 2 * 1024 * 1024; // 2MB #[cfg(test)] mod tests; diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index b1547eb2e7..49d1ba90b2 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -467,6 +467,14 @@ fn create_event_info_data_code( end-cycle-id: (some (+ {reward_cycle} u1)), ;; Get start cycle ID start-cycle-id: start-cycle, + ;; equal to args[3] + signer-sig: {signer_sig}, + ;; equal to args[4] + signer-key: {signer_key}, + ;; equal to args[5] + max-amount: {max_amount}, + ;; equal to args[6] + auth-id: {auth_id}, }} }}) "#, @@ -474,6 +482,10 @@ fn create_event_info_data_code( reward_cycle = &args[1], reward_cycle_index = &args.get(2).unwrap_or(&Value::none()), pox_set_offset = pox_set_offset.replace("%height%", "burn-block-height"), + signer_sig = &args.get(3).unwrap_or(&Value::none()), + signer_key = &args.get(4).unwrap_or(&Value::none()), + max_amount = &args.get(5).unwrap_or(&Value::none()), + auth_id = &args.get(6).unwrap_or(&Value::none()), ) } "delegate-stx" => { diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index d9f987f574..1337aa808c 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -53,6 +53,7 @@ features = ["serde", "recovery"] [dependencies.rusqlite] version = "=0.24.2" +optional = true features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] [dependencies.ed25519-dalek] @@ -73,10 +74,11 @@ assert-json-diff = "1.0.0" rand_core = { workspace = true } [features] -default = ["developer-mode"] +default = ["canonical", "developer-mode"] +canonical = ["rusqlite"] developer-mode = [] slog_json = ["slog-json"] -testing = [] +testing = ["canonical"] [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index 0150346068..01a34d0e62 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -1,4 +1,22 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[cfg(feature = "canonical")] use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSqlOutput, ValueRef}; +#[cfg(feature = "canonical")] use rusqlite::ToSql; use serde::{Deserialize, Serialize}; @@ -90,6 +108,7 @@ impl<'de, const MAX_SIZE: u16> Deserialize<'de> for BitVec { } } +#[cfg(feature = "canonical")] impl FromSql for BitVec { fn column_result(value: ValueRef<'_>) -> FromSqlResult { let bytes = hex_bytes(value.as_str()?).map_err(|e| FromSqlError::Other(Box::new(e)))?; @@ -98,6 +117,7 @@ impl FromSql for BitVec { } } +#[cfg(feature = "canonical")] impl ToSql for BitVec { fn to_sql(&self) -> rusqlite::Result> { let hex = bytes_to_hex(self.serialize_to_vec().as_slice()); @@ -117,10 +137,20 @@ impl BitVec { Ok(BitVec { data, len }) } + /// Construct a new BitVec with all entries set to `true` and total length `len` + pub fn ones(len: u16) -> Result, String> { + let mut bitvec: BitVec = BitVec::zeros(len)?; + for i in 0..len { + bitvec.set(i, true)?; + } + Ok(bitvec) + } + pub fn len(&self) -> u16 { self.len } + /// Return the number of bytes needed to store `len` bits. fn data_len(len: u16) -> u16 { len / 8 + if len % 8 == 0 { 0 } else { 1 } } @@ -169,12 +199,30 @@ impl BitVec { self.data[i] = 0; } } + + /// Serialize a BitVec to a string of 1s and 0s for display + /// purposes. For example, a BitVec with [true, false, true] + /// will be serialized to "101". + pub fn binary_str(&self) -> String { + self.clone() + .data + .into_iter() + .fold(String::new(), |acc, byte| { + acc + &format!("{:08b}", byte).chars().rev().collect::() + }) + .chars() + .take(self.len() as usize) + .collect::() + } } #[cfg(test)] mod test { + use serde_json; + use super::BitVec; use crate::codec::StacksMessageCodec; + use crate::util::hash::to_hex; fn check_set_get(mut input: BitVec<{ u16::MAX }>) { let original_input = input.clone(); @@ -258,6 +306,31 @@ mod test { ); } + #[test] + fn binary_str_serialization() { + let mut bitvec_zero_10 = BitVec::<10>::zeros(10).unwrap(); + bitvec_zero_10.set(0, true).unwrap(); + bitvec_zero_10.set(5, true).unwrap(); + bitvec_zero_10.set(3, true).unwrap(); + assert_eq!( + bitvec_zero_10.binary_str(), + "1001010000", + "Binary string should be 1001010000" + ); + } + + #[test] + fn bitvec_ones() { + let bitvec_ones_10 = BitVec::<10>::ones(10).unwrap(); + for i in 0..10 { + assert!( + bitvec_ones_10.get(i).unwrap(), + "All values of ones vec should be true" + ); + } + info!("bitvec_ones_10: {:?}", bitvec_ones_10.binary_str()); + } + #[test] fn vectors() { let mut inputs = vec![ diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index 0a9fa9d641..b93c78c50f 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -37,6 +37,7 @@ use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId pub mod consts { use crate::types::chainstate::{BlockHeaderHash, ConsensusHash}; + pub use crate::types::MINING_COMMITMENT_WINDOW; pub const TOKEN_TRANSFER_MEMO_LENGTH: usize = 34; // same as it is in Stacks v1 @@ -64,3 +65,18 @@ pub mod consts { /// to use to participate in DKG and block validation signing. pub const SIGNER_SLOTS_PER_USER: u32 = 13; } + +/// This test asserts that the constant above doesn't change. +/// This exists because the constant above is used by Epoch 2.5 instantiation code. +/// +/// Adding more slots will require instantiating more .signers contracts through either +/// consensus changes (i.e., a new epoch) or through non-consensus-critical contract +/// deployments. +#[test] +fn signer_slots_count_2_5() { + assert_eq!( + consts::SIGNER_SLOTS_PER_USER, + 13, + "The .signers-x-y contracts in Epoch 2.5 were instantiated with 13 slots" + ); +} diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index d41e21225d..bc82c20c1c 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -4,7 +4,6 @@ use std::str::FromStr; use curve25519_dalek::digest::Digest; use rand::{Rng, SeedableRng}; -use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use serde::de::{Deserialize, Error as de_Error}; use serde::ser::Error as ser_Error; use serde::Serialize; @@ -64,7 +63,6 @@ pub struct SortitionId(pub [u8; 32]); impl_array_newtype!(SortitionId, u8, 32); impl_array_hexstring_fmt!(SortitionId); impl_byte_array_newtype!(SortitionId, u8, 32); -impl_byte_array_rusqlite_only!(SortitionId); pub struct VRFSeed(pub [u8; 32]); impl_array_newtype!(VRFSeed, u8, 32); @@ -247,7 +245,6 @@ pub struct StacksBlockId(pub [u8; 32]); impl_array_newtype!(StacksBlockId, u8, 32); impl_array_hexstring_fmt!(StacksBlockId); impl_byte_array_newtype!(StacksBlockId, u8, 32); -impl_byte_array_rusqlite_only!(StacksBlockId); impl_byte_array_serde!(StacksBlockId); pub struct ConsensusHash(pub [u8; 20]); @@ -323,18 +320,6 @@ impl StacksMessageCodec for StacksWorkScore { } } -// Implement rusqlite traits for a bunch of structs that used to be defined -// in the chainstate code -impl_byte_array_rusqlite_only!(ConsensusHash); -impl_byte_array_rusqlite_only!(Hash160); -impl_byte_array_rusqlite_only!(BlockHeaderHash); -impl_byte_array_rusqlite_only!(VRFSeed); -impl_byte_array_rusqlite_only!(BurnchainHeaderHash); -impl_byte_array_rusqlite_only!(VRFProof); -impl_byte_array_rusqlite_only!(TrieHash); -impl_byte_array_rusqlite_only!(Sha512Trunc256Sum); -impl_byte_array_rusqlite_only!(MessageSignature); - impl_byte_array_message_codec!(TrieHash, TRIEHASH_ENCODED_SIZE as u32); impl_byte_array_message_codec!(Sha512Trunc256Sum, 32); @@ -400,21 +385,6 @@ impl BurnchainHeaderHash { } } -impl FromSql for Sha256dHash { - fn column_result(value: ValueRef) -> FromSqlResult { - let hex_str = value.as_str()?; - let hash = Sha256dHash::from_hex(hex_str).map_err(|_e| FromSqlError::InvalidType)?; - Ok(hash) - } -} - -impl ToSql for Sha256dHash { - fn to_sql(&self) -> rusqlite::Result { - let hex_str = self.be_hex_string(); - Ok(hex_str.into()) - } -} - impl VRFSeed { /// First-ever VRF seed from the genesis block. It's all 0's pub fn initial() -> VRFSeed { diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 2652347273..e6e5cf5f79 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -1,6 +1,9 @@ use std::cmp::Ordering; use std::fmt; +#[cfg(feature = "canonical")] +pub mod sqlite; + use crate::address::c32::{c32_address, c32_address_decode}; use crate::address::{ public_keys_to_address_hash, to_bits_p2pkh, AddressHashMode, @@ -60,6 +63,16 @@ pub const PEER_VERSION_EPOCH_2_0: u8 = 0x00; pub const PEER_VERSION_EPOCH_2_05: u8 = 0x05; pub const PEER_VERSION_EPOCH_2_1: u8 = 0x06; +// sliding burnchain window over which a miner's past block-commit payouts will be used to weight +// its current block-commit in a sortition. +// This is the value used in epoch 2.x +pub const MINING_COMMITMENT_WINDOW: u8 = 6; + +// how often a miner must commit in its mining commitment window in order to even be considered for +// sortition. +// Only relevant for Nakamoto (epoch 3.x) +pub const MINING_COMMITMENT_FREQUENCY_NAKAMOTO: u8 = 3; + #[repr(u32)] #[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Copy, Serialize, Deserialize)] pub enum StacksEpochId { @@ -117,6 +130,27 @@ impl StacksEpochId { pub fn supports_pox_missed_slot_unlocks(&self) -> bool { self < &StacksEpochId::Epoch25 } + + /// What is the sortition mining commitment window for this epoch? + pub fn mining_commitment_window(&self) -> u8 { + MINING_COMMITMENT_WINDOW + } + + /// How often must a miner mine in order to be considered for sortition in its commitment + /// window? + pub fn mining_commitment_frequency(&self) -> u8 { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => 0, + StacksEpochId::Epoch30 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, + } + } } impl std::fmt::Display for StacksEpochId { diff --git a/stacks-common/src/types/sqlite.rs b/stacks-common/src/types/sqlite.rs new file mode 100644 index 0000000000..7aba8a2b1e --- /dev/null +++ b/stacks-common/src/types/sqlite.rs @@ -0,0 +1,55 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; + +use super::chainstate::VRFSeed; +use crate::deps_common::bitcoin::util::hash::Sha256dHash; +use crate::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, TrieHash, +}; +use crate::util::hash::{Hash160, Sha512Trunc256Sum}; +use crate::util::secp256k1::MessageSignature; +use crate::util::vrf::VRFProof; + +impl FromSql for Sha256dHash { + fn column_result(value: ValueRef) -> FromSqlResult { + let hex_str = value.as_str()?; + let hash = Sha256dHash::from_hex(hex_str).map_err(|_e| FromSqlError::InvalidType)?; + Ok(hash) + } +} + +impl ToSql for Sha256dHash { + fn to_sql(&self) -> rusqlite::Result { + let hex_str = self.be_hex_string(); + Ok(hex_str.into()) + } +} + +// Implement rusqlite traits for a bunch of structs that used to be defined +// in the chainstate code +impl_byte_array_rusqlite_only!(ConsensusHash); +impl_byte_array_rusqlite_only!(Hash160); +impl_byte_array_rusqlite_only!(BlockHeaderHash); +impl_byte_array_rusqlite_only!(VRFSeed); +impl_byte_array_rusqlite_only!(BurnchainHeaderHash); +impl_byte_array_rusqlite_only!(VRFProof); +impl_byte_array_rusqlite_only!(TrieHash); +impl_byte_array_rusqlite_only!(Sha512Trunc256Sum); +impl_byte_array_rusqlite_only!(MessageSignature); +impl_byte_array_rusqlite_only!(SortitionId); +impl_byte_array_rusqlite_only!(StacksBlockId); diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 57ce30ad9c..5348dc1cbf 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -88,30 +88,74 @@ macro_rules! define_named_enum { /// and EnumType.get_name() for free. #[macro_export] macro_rules! define_versioned_named_enum { - ($Name:ident($VerType:ty) { $($Variant:ident($VarName:literal, $Version:expr),)* }) => - { + ($Name:ident($VerType:ty) { $($Variant:ident($VarName:literal, $MinVersion:expr)),* $(,)* }) => { + $crate::define_versioned_named_enum_internal!($Name($VerType) { + $($Variant($VarName, $MinVersion, None)),* + }); + }; +} +#[macro_export] +macro_rules! define_versioned_named_enum_with_max { + ($Name:ident($VerType:ty) { $($Variant:ident($VarName:literal, $MinVersion:expr, $MaxVersion:expr)),* $(,)* }) => { + $crate::define_versioned_named_enum_internal!($Name($VerType) { + $($Variant($VarName, $MinVersion, $MaxVersion)),* + }); + }; +} + +// An internal macro that does the actual enum definition +#[macro_export] +macro_rules! define_versioned_named_enum_internal { + ($Name:ident($VerType:ty) { $($Variant:ident($VarName:literal, $MinVersion:expr, $MaxVersion:expr)),* $(,)* }) => { #[derive(::serde::Serialize, ::serde::Deserialize, Debug, Hash, PartialEq, Eq, Copy, Clone)] pub enum $Name { $($Variant),*, } + impl $Name { pub const ALL: &'static [$Name] = &[$($Name::$Variant),*]; pub const ALL_NAMES: &'static [&'static str] = &[$($VarName),*]; pub fn lookup_by_name(name: &str) -> Option { match name { - $( - $VarName => Some($Name::$Variant), - )* - _ => None + $($VarName => Some($Name::$Variant),)* + _ => None, + } + } + + pub fn lookup_by_name_at_version(name: &str, version: &ClarityVersion) -> Option { + Self::lookup_by_name(name).and_then(|variant| { + let is_active = match ( + variant.get_min_version(), + variant.get_max_version(), + ) { + (ref min_version, Some(ref max_version)) => { + min_version <= version && version <= max_version + } + // No max version is set, so the function is active for all versions greater than min + (ref min_version, None) => min_version <= version, + }; + if is_active { + Some(variant) + } else { + None + } + }) + } + + /// Returns the first Clarity version in which `self` is defined. + pub fn get_min_version(&self) -> $VerType { + match self { + $(Self::$Variant => $MinVersion,)* } } - pub fn get_version(&self) -> $VerType { + /// Returns `Some` for the last Clarity version in which `self` is + /// defined, or `None` if `self` is defined for all versions after + /// `get_min_version()`. + pub fn get_max_version(&self) -> Option<$VerType> { match self { - $( - $Name::$Variant => $Version, - )* + $(Self::$Variant => $MaxVersion,)* } } @@ -125,18 +169,17 @@ macro_rules! define_versioned_named_enum { pub fn get_name_str(&self) -> &'static str { match self { - $( - $Name::$Variant => $VarName, - )* + $(Self::$Variant => $VarName,)* } } } + impl ::std::fmt::Display for $Name { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { write!(f, "{}", self.get_name_str()) } } - } + }; } #[allow(clippy::crate_in_macro_def)] @@ -637,6 +680,7 @@ macro_rules! fmax { }} } +#[cfg(feature = "canonical")] macro_rules! impl_byte_array_rusqlite_only { ($thing:ident) => { impl rusqlite::types::FromSql for $thing { diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 57b2e80804..087a0a4472 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -24,8 +24,10 @@ backoff = "0.4" clarity = { path = "../clarity" } clap = { version = "4.1.1", features = ["derive", "env"] } hashbrown = { workspace = true } +lazy_static = "1.4.0" libsigner = { path = "../libsigner" } libstackerdb = { path = "../libstackerdb" } +prometheus = { version = "0.9", optional = true } rand_core = "0.6" reqwest = { version = "0.11.22", default-features = false, features = ["blocking", "json", "rustls-tls"] } serde = "1" @@ -37,6 +39,7 @@ slog-term = "2.6.0" stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib" } thiserror = "1.0" +tiny_http = { version = "0.12", optional = true } toml = "0.5.6" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } @@ -60,3 +63,6 @@ features = ["arbitrary_precision", "unbounded_depth"] [dependencies.secp256k1] version = "0.24.3" features = ["serde", "recovery"] + +[features] +monitoring_prom = ["libsigner/monitoring_prom", "prometheus", "tiny_http"] \ No newline at end of file diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 28ead30fee..1cc51bfe68 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::io::{self, Read}; -use std::net::SocketAddr; use std::path::PathBuf; use blockstack_lib::chainstate::stacks::address::PoxAddress; @@ -28,8 +27,6 @@ use stacks_common::address::{ }; use stacks_common::types::chainstate::StacksPrivateKey; -use crate::config::Network; - extern crate alloc; #[derive(Parser, Debug)] @@ -44,24 +41,16 @@ pub struct Cli { /// Subcommands for the stacks signer binary #[derive(clap::Subcommand, Debug)] pub enum Command { - /// Get a chunk from the stacker-db instance + /// Get a chunk from the stacker-db instance in hex encoding GetChunk(GetChunkArgs), - /// Get the latest chunk from the stacker-db instance + /// Get the latest chunk from the stacker-db instance in hex encoding GetLatestChunk(GetLatestChunkArgs), - /// List chunks from the stacker-db instance + /// List chunks from the stacker-db instance in hex encoding ListChunks(StackerDBArgs), /// Upload a chunk to the stacker-db instance PutChunk(PutChunkArgs), - /// Run DKG and sign the message through the stacker-db instance - DkgSign(SignArgs), - /// Sign the message through the stacker-db instance - Sign(SignArgs), - /// Run a DKG round through the stacker-db instance - Dkg(RunDkgArgs), /// Run the signer, waiting for events from the stacker-db instance Run(RunSignerArgs), - /// Generate necessary files for running a collection of signers - GenerateFiles(GenerateFilesArgs), /// Generate a signature for Stacking transactions GenerateStackingSignature(GenerateStackingSignatureArgs), /// Check a configuration file and output config information @@ -126,34 +115,6 @@ pub struct PutChunkArgs { pub data: alloc::vec::Vec, } -#[derive(Parser, Debug, Clone)] -/// Arguments for the dkg-sign and sign command -pub struct SignArgs { - /// Path to config file - #[arg(long, short, value_name = "FILE")] - pub config: PathBuf, - /// The reward cycle the signer is registered for and wants to sign for - /// Note: this must be the current reward cycle of the node - #[arg(long, short)] - pub reward_cycle: u64, - /// The data to sign - #[arg(required = false, value_parser = parse_data)] - // Note this weirdness is due to https://github.com/clap-rs/clap/discussions/4695 - // Need to specify the long name here due to invalid parsing in Clap which looks at the NAME rather than the TYPE which causes issues in how it handles Vec's. - pub data: alloc::vec::Vec, -} - -#[derive(Parser, Debug, Clone)] -/// Arguments for the Dkg command -pub struct RunDkgArgs { - /// Path to config file - #[arg(long, short, value_name = "FILE")] - pub config: PathBuf, - /// The reward cycle the signer is registered for and wants to peform DKG for - #[arg(long, short)] - pub reward_cycle: u64, -} - #[derive(Parser, Debug, Clone)] /// Arguments for the Run command pub struct RunSignerArgs { @@ -162,36 +123,6 @@ pub struct RunSignerArgs { pub config: PathBuf, } -#[derive(Parser, Debug, Clone)] -/// Arguments for the generate-files command -pub struct GenerateFilesArgs { - /// The Stacks node to connect to - #[arg(long)] - pub host: SocketAddr, - #[arg( - long, - required_unless_present = "private_keys", - conflicts_with = "private_keys" - )] - /// The number of signers to generate - pub num_signers: Option, - #[clap(long, value_name = "FILE")] - /// A path to a file containing a list of hexadecimal Stacks private keys of the signers - pub private_keys: Option, - #[arg(long, value_parser = parse_network)] - /// The network to use. One of "mainnet", "testnet", or "mocknet". - pub network: Network, - /// The directory to write the test data files to - #[arg(long, default_value = ".")] - pub dir: PathBuf, - /// The number of milliseconds to wait when polling for events from the stacker-db instance. - #[arg(long)] - pub timeout: Option, - #[arg(long)] - /// The authorization password to use to connect to the validate block proposal node endpoint - pub password: String, -} - #[derive(Clone, Debug)] /// Wrapper around `Pox4SignatureTopic` to implement `ValueEnum` pub struct StackingSignatureMethod(Pox4SignatureTopic); @@ -312,21 +243,6 @@ fn parse_data(data: &str) -> Result, String> { Ok(data) } -/// Parse the network. Must be one of "mainnet", "testnet", or "mocknet". -fn parse_network(network: &str) -> Result { - Ok(match network.to_lowercase().as_str() { - "mainnet" => Network::Mainnet, - "testnet" => Network::Testnet, - "mocknet" => Network::Mocknet, - _ => { - return Err(format!( - "Invalid network: {}. Must be one of \"mainnet\", \"testnet\", or \"mocknet\".", - network - )) - } - }) -} - #[cfg(test)] mod tests { use blockstack_lib::chainstate::stacks::address::{PoxAddressType20, PoxAddressType32}; @@ -351,7 +267,7 @@ mod tests { } fn clarity_tuple_version(pox_addr: &PoxAddress) -> u8 { - pox_addr + *pox_addr .as_clarity_tuple() .expect("Failed to generate clarity tuple for pox address") .get("version") @@ -359,9 +275,8 @@ mod tests { .clone() .expect_buff(1) .expect("Expected version to be a u128") - .get(0) + .first() .expect("Expected version to be a uint") - .clone() } #[test] diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 87bee14750..9c4fc652a5 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -83,6 +83,9 @@ pub enum ClientError { /// Stacks node does not support a feature we need #[error("Stacks node does not support a required feature: {0}")] UnsupportedStacksFeature(String), + /// Invalid response from the stacks node + #[error("Invalid response from the stacks node: {0}")] + InvalidResponse(String), } /// Retry a function F with an exponential backoff and notification on transient failure @@ -116,6 +119,7 @@ pub(crate) mod tests { use blockstack_lib::net::api::getpoxinfo::{ RPCPoxCurrentCycleInfo, RPCPoxEpoch, RPCPoxInfoData, RPCPoxNextCycleInfo, }; + use blockstack_lib::net::api::postfeerate::{RPCFeeEstimate, RPCFeeEstimateResponse}; use blockstack_lib::util_lib::boot::boot_code_id; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::TupleData; @@ -137,7 +141,6 @@ pub(crate) mod tests { use super::*; use crate::config::{GlobalConfig, SignerConfig}; - use crate::signer::SignerSlotID; pub struct MockServerClient { pub server: TcpListener, @@ -398,6 +401,44 @@ pub(crate) mod tests { format!("HTTP/1.1 200 OK\n\n{{\"okay\":true,\"result\":\"{hex}\"}}") } + /// Build a response for the get_medium_estimated_fee_ustx_response request with a specific medium estimate + pub fn build_get_medium_estimated_fee_ustx_response( + medium_estimate: u64, + ) -> (String, RPCFeeEstimateResponse) { + // Generate some random info + let fee_response = RPCFeeEstimateResponse { + estimated_cost: ExecutionCost { + write_length: thread_rng().next_u64(), + write_count: thread_rng().next_u64(), + read_length: thread_rng().next_u64(), + read_count: thread_rng().next_u64(), + runtime: thread_rng().next_u64(), + }, + estimated_cost_scalar: thread_rng().next_u64(), + cost_scalar_change_by_byte: thread_rng().next_u32() as f64, + estimations: vec![ + RPCFeeEstimate { + fee_rate: thread_rng().next_u32() as f64, + fee: thread_rng().next_u64(), + }, + RPCFeeEstimate { + fee_rate: thread_rng().next_u32() as f64, + fee: medium_estimate, + }, + RPCFeeEstimate { + fee_rate: thread_rng().next_u32() as f64, + fee: thread_rng().next_u64(), + }, + ], + }; + let fee_response_json = serde_json::to_string(&fee_response) + .expect("Failed to serialize fee estimate response"); + ( + format!("HTTP/1.1 200 OK\n\n{fee_response_json}"), + fee_response, + ) + } + /// Generate a signer config with the given number of signers and keys where the first signer is /// obtained from the provided global config pub fn generate_signer_config( @@ -515,6 +556,7 @@ pub(crate) mod tests { nonce_timeout: config.nonce_timeout, sign_timeout: config.sign_timeout, tx_fee_ustx: config.tx_fee_ustx, + max_tx_fee_ustx: config.max_tx_fee_ustx, db_path: config.db_path.clone(), } } diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 691cde08cc..f23679b099 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -17,19 +17,31 @@ use blockstack_lib::chainstate::stacks::StacksTransaction; use blockstack_lib::net::api::poststackerdbchunk::StackerDBErrorCodes; use hashbrown::HashMap; -use libsigner::{MessageSlotID, SignerMessage, SignerSession, StackerDBSession}; +use libsigner::v1::messages::{MessageSlotID, SignerMessage}; +use libsigner::{SignerSession, StackerDBSession}; use libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; -use slog::{slog_debug, slog_warn}; +use slog::{slog_debug, slog_error, slog_warn}; use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::StacksPrivateKey; -use stacks_common::{debug, warn}; +use stacks_common::{debug, error, warn}; +use wsts::net::Packet; use super::ClientError; use crate::client::retry_with_exponential_backoff; use crate::config::SignerConfig; -use crate::signer::SignerSlotID; + +/// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID +#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)] +pub struct SignerSlotID(pub u32); + +impl std::fmt::Display for SignerSlotID { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} /// The StackerDB client for communicating with the .signers contract +#[derive(Debug)] pub struct StackerDB { /// The stacker-db sessions for each signer set and message type. /// Maps message ID to the DB session. @@ -100,7 +112,7 @@ impl StackerDB { } /// Sends message (as a raw msg ID and bytes) to the .signers stacker-db with an - /// exponential backoff retry + /// exponential backoff retry pub fn send_message_bytes_with_retry( &mut self, msg_id: &MessageSlotID, @@ -130,7 +142,7 @@ impl StackerDB { }; debug!( - "Sending a chunk to stackerdb slot ID {slot_id} with version {slot_version} to contract {:?}!\n{chunk:?}", + "Sending a chunk to stackerdb slot ID {slot_id} with version {slot_version} and message ID {msg_id} to contract {:?}!\n{chunk:?}", &session.stackerdb_contract_id ); @@ -179,54 +191,86 @@ impl StackerDB { } } - /// Get the transactions from stackerdb for the signers - fn get_transactions( - transactions_session: &mut StackerDBSession, - signer_ids: &[SignerSlotID], - ) -> Result, ClientError> { + /// Get all signer messages from stackerdb for the given slot IDs + fn get_messages( + session: &mut StackerDBSession, + slot_ids: &[u32], + ) -> Result, ClientError> { + let mut messages = vec![]; let send_request = || { - transactions_session - .get_latest_chunks(&signer_ids.iter().map(|id| id.0).collect::>()) + session + .get_latest_chunks(slot_ids) .map_err(backoff::Error::transient) }; let chunk_ack = retry_with_exponential_backoff(send_request)?; - let mut transactions = Vec::new(); for (i, chunk) in chunk_ack.iter().enumerate() { - let signer_id = *signer_ids - .get(i) - .expect("BUG: retrieved an unequal amount of chunks to requested chunks"); let Some(data) = chunk else { continue; }; let Ok(message) = read_next::(&mut &data[..]) else { if !data.is_empty() { warn!("Failed to deserialize chunk data into a SignerMessage"); - debug!( - "signer #{signer_id}: Failed chunk ({}): {data:?}", - &data.len(), - ); + debug!("slot #{i}: Failed chunk ({}): {data:?}", &data.len(),); } continue; }; + messages.push(message); + } + Ok(messages) + } + + /// Get the ordered DKG packets from stackerdb for the signer slot IDs. + pub fn get_dkg_packets( + &mut self, + signer_ids: &[SignerSlotID], + ) -> Result, ClientError> { + let packet_slots = &[ + MessageSlotID::DkgBegin, + MessageSlotID::DkgPublicShares, + MessageSlotID::DkgPrivateBegin, + MessageSlotID::DkgPrivateShares, + MessageSlotID::DkgEndBegin, + MessageSlotID::DkgEnd, + ]; + let slot_ids = signer_ids.iter().map(|id| id.0).collect::>(); + let mut packets = vec![]; + for packet_slot in packet_slots { + let session = self + .signers_message_stackerdb_sessions + .get_mut(packet_slot) + .ok_or(ClientError::NotConnected)?; + let messages = Self::get_messages(session, &slot_ids)?; + for message in messages { + let SignerMessage::Packet(packet) = message else { + warn!("Found an unexpected type in a packet slot {packet_slot}"); + continue; + }; + packets.push(packet); + } + } + Ok(packets) + } + /// Get the transactions from stackerdb for the signers + fn get_transactions( + transactions_session: &mut StackerDBSession, + signer_ids: &[SignerSlotID], + ) -> Result, ClientError> { + let slot_ids = signer_ids.iter().map(|id| id.0).collect::>(); + let messages = Self::get_messages(transactions_session, &slot_ids)?; + let mut transactions = vec![]; + for message in messages { let SignerMessage::Transactions(chunk_transactions) = message else { warn!("Signer wrote an unexpected type to the transactions slot"); continue; }; - debug!( - "Retrieved {} transactions from signer ID {}.", - chunk_transactions.len(), - signer_id - ); transactions.extend(chunk_transactions); } Ok(transactions) } /// Get this signer's latest transactions from stackerdb - pub fn get_current_transactions_with_retry( - &mut self, - ) -> Result, ClientError> { + pub fn get_current_transactions(&mut self) -> Result, ClientError> { let Some(transactions_session) = self .signers_message_stackerdb_sessions .get_mut(&MessageSlotID::Transactions) @@ -237,7 +281,7 @@ impl StackerDB { } /// Get the latest signer transactions from signer ids for the next reward cycle - pub fn get_next_transactions_with_retry( + pub fn get_next_transactions( &mut self, signer_ids: &[SignerSlotID], ) -> Result, ClientError> { @@ -245,6 +289,51 @@ impl StackerDB { Self::get_transactions(&mut self.next_transaction_session, signer_ids) } + /// Get the encrypted state for the given signer + pub fn get_encrypted_signer_state( + &mut self, + signer_id: SignerSlotID, + ) -> Result>, ClientError> { + debug!("Getting the persisted encrypted state for signer {signer_id}"); + let Some(state_session) = self + .signers_message_stackerdb_sessions + .get_mut(&MessageSlotID::EncryptedSignerState) + else { + return Err(ClientError::NotConnected); + }; + + let send_request = || { + state_session + .get_latest_chunks(&[signer_id.0]) + .map_err(backoff::Error::transient) + }; + + let Some(chunk) = retry_with_exponential_backoff(send_request)?.pop().ok_or( + ClientError::UnexpectedResponseFormat(format!( + "Missing response for state session request for signer {}", + signer_id + )), + )? + else { + debug!("No persisted state for signer {signer_id}"); + return Ok(None); + }; + + if chunk.is_empty() { + debug!("Empty persisted state for signer {signer_id}"); + return Ok(None); + } + + let SignerMessage::EncryptedSignerState(state) = + read_next::(&mut chunk.as_slice())? + else { + error!("Wrong message type stored in signer state slot for signer {signer_id}"); + return Ok(None); + }; + + Ok(Some(state)) + } + /// Retrieve the signer set this stackerdb client is attached to pub fn get_signer_set(&self) -> u32 { u32::try_from(self.reward_cycle % 2).expect("FATAL: reward cycle % 2 exceeds u32::MAX") @@ -272,7 +361,7 @@ mod tests { use crate::config::GlobalConfig; #[test] - fn get_signer_transactions_with_retry_should_succeed() { + fn get_signer_transactions_should_succeed() { let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); let signer_config = generate_signer_config(&config, 5, 20); let mut stackerdb = StackerDB::from(&signer_config); @@ -297,7 +386,7 @@ mod tests { let message = signer_message.serialize_to_vec(); let signer_slot_ids = vec![SignerSlotID(0), SignerSlotID(1)]; - let h = spawn(move || stackerdb.get_next_transactions_with_retry(&signer_slot_ids)); + let h = spawn(move || stackerdb.get_next_transactions(&signer_slot_ids)); let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); response_bytes.extend(message); let mock_server = mock_server_from_config(&config); @@ -315,7 +404,7 @@ mod tests { } #[test] - fn send_signer_message_with_retry_should_succeed() { + fn send_signer_message_should_succeed() { let config = GlobalConfig::load_from_file("./src/tests/conf/signer-1.toml").unwrap(); let signer_config = generate_signer_config(&config, 5, 20); let mut stackerdb = StackerDB::from(&signer_config); diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index b89c5462dd..145c83dddd 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -31,7 +31,9 @@ use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::RPCPoxInfoData; use blockstack_lib::net::api::getstackers::GetStackersResponse; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; +use blockstack_lib::net::api::postfeerate::{FeeRateEstimateRequestBody, RPCFeeEstimateResponse}; use blockstack_lib::util_lib::boot::{boot_code_addr, boot_code_id}; +use clarity::util::hash::to_hex; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use reqwest::header::AUTHORIZATION; @@ -196,9 +198,46 @@ impl StacksClient { } } + /// Retrieve the medium estimated transaction fee in uSTX from the stacks node for the given transaction + pub fn get_medium_estimated_fee_ustx( + &self, + tx: &StacksTransaction, + ) -> Result { + let request = FeeRateEstimateRequestBody { + estimated_len: Some(tx.tx_len()), + transaction_payload: to_hex(&tx.payload.serialize_to_vec()), + }; + let timer = + crate::monitoring::new_rpc_call_timer(&self.fees_transaction_path(), &self.http_origin); + let send_request = || { + self.stacks_node_client + .post(self.fees_transaction_path()) + .header("Content-Type", "application/json") + .json(&request) + .send() + .map_err(backoff::Error::transient) + }; + let response = retry_with_exponential_backoff(send_request)?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + timer.stop_and_record(); + let fee_estimate_response = response.json::()?; + let fee = fee_estimate_response + .estimations + .get(1) + .map(|estimate| estimate.fee) + .ok_or_else(|| { + ClientError::UnexpectedResponseFormat( + "RPCFeeEstimateResponse missing medium fee estimate".into(), + ) + })?; + Ok(fee) + } + /// Determine the stacks node current epoch pub fn get_node_epoch(&self) -> Result { - let pox_info = self.get_pox_data_with_retry()?; + let pox_info = self.get_pox_data()?; let burn_block_height = self.get_burn_block_height()?; let epoch_25 = pox_info @@ -227,14 +266,13 @@ impl StacksClient { } /// Submit the block proposal to the stacks node. The block will be validated and returned via the HTTP endpoint for Block events. - pub fn submit_block_for_validation_with_retry( - &self, - block: NakamotoBlock, - ) -> Result<(), ClientError> { + pub fn submit_block_for_validation(&self, block: NakamotoBlock) -> Result<(), ClientError> { let block_proposal = NakamotoBlockProposal { block, chain_id: self.chain_id, }; + let timer = + crate::monitoring::new_rpc_call_timer(&self.block_proposal_path(), &self.http_origin); let send_request = || { self.stacks_node_client .post(self.block_proposal_path()) @@ -246,6 +284,7 @@ impl StacksClient { }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -316,13 +355,14 @@ impl StacksClient { /// Retrieve the current account nonce for the provided address pub fn get_account_nonce(&self, address: &StacksAddress) -> Result { - let account_entry = self.get_account_entry_with_retry(address)?; - Ok(account_entry.nonce) + self.get_account_entry(address).map(|entry| entry.nonce) } /// Get the current peer info data from the stacks node - pub fn get_peer_info_with_retry(&self) -> Result { + pub fn get_peer_info(&self) -> Result { debug!("Getting stacks node info..."); + let timer = + crate::monitoring::new_rpc_call_timer(&self.core_info_path(), &self.http_origin); let send_request = || { self.stacks_node_client .get(self.core_info_path()) @@ -330,6 +370,7 @@ impl StacksClient { .map_err(backoff::Error::transient) }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -365,11 +406,15 @@ impl StacksClient { } /// Get the reward set signers from the stacks node for the given reward cycle - pub fn get_reward_set_signers_with_retry( + pub fn get_reward_set_signers( &self, reward_cycle: u64, ) -> Result>, ClientError> { debug!("Getting reward set for reward cycle {reward_cycle}..."); + let timer = crate::monitoring::new_rpc_call_timer( + &self.reward_set_path(reward_cycle), + &self.http_origin, + ); let send_request = || { self.stacks_node_client .get(self.reward_set_path(reward_cycle)) @@ -377,6 +422,7 @@ impl StacksClient { .map_err(backoff::Error::transient) }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -385,8 +431,10 @@ impl StacksClient { } /// Retreive the current pox data from the stacks node - pub fn get_pox_data_with_retry(&self) -> Result { + pub fn get_pox_data(&self) -> Result { debug!("Getting pox data..."); + #[cfg(feature = "monitoring_prom")] + let timer = crate::monitoring::new_rpc_call_timer(&self.pox_path(), &self.http_origin); let send_request = || { self.stacks_node_client .get(self.pox_path()) @@ -394,6 +442,8 @@ impl StacksClient { .map_err(backoff::Error::transient) }; let response = retry_with_exponential_backoff(send_request)?; + #[cfg(feature = "monitoring_prom")] + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -403,23 +453,22 @@ impl StacksClient { /// Helper function to retrieve the burn tip height from the stacks node fn get_burn_block_height(&self) -> Result { - let peer_info = self.get_peer_info_with_retry()?; - Ok(peer_info.burn_block_height) + self.get_peer_info().map(|info| info.burn_block_height) } /// Get the current reward cycle info from the stacks node pub fn get_current_reward_cycle_info(&self) -> Result { - let pox_data = self.get_pox_data_with_retry()?; + let pox_data = self.get_pox_data()?; let blocks_mined = pox_data .current_burnchain_block_height .saturating_sub(pox_data.first_burnchain_block_height); - let reward_phase_block_length = pox_data + let reward_cycle_length = pox_data .reward_phase_block_length .saturating_add(pox_data.prepare_phase_block_length); - let reward_cycle = blocks_mined / reward_phase_block_length; + let reward_cycle = blocks_mined / reward_cycle_length; Ok(RewardCycleInfo { reward_cycle, - reward_phase_block_length, + reward_cycle_length, prepare_phase_block_length: pox_data.prepare_phase_block_length, first_burnchain_block_height: pox_data.first_burnchain_block_height, last_burnchain_block_height: pox_data.current_burnchain_block_height, @@ -427,11 +476,13 @@ impl StacksClient { } /// Helper function to retrieve the account info from the stacks node for a specific address - fn get_account_entry_with_retry( + pub fn get_account_entry( &self, address: &StacksAddress, ) -> Result { debug!("Getting account info..."); + let timer = + crate::monitoring::new_rpc_call_timer(&self.accounts_path(address), &self.http_origin); let send_request = || { self.stacks_node_client .get(self.accounts_path(address)) @@ -439,6 +490,7 @@ impl StacksClient { .map_err(backoff::Error::transient) }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -469,13 +521,12 @@ impl StacksClient { } /// Helper function to create a stacks transaction for a modifying contract call - pub fn build_vote_for_aggregate_public_key( + pub fn build_unsigned_vote_for_aggregate_public_key( &self, signer_index: u32, round: u64, dkg_public_key: Point, reward_cycle: u64, - tx_fee: Option, nonce: u64, ) -> Result { debug!("Building {SIGNERS_VOTING_FUNCTION_NAME} transaction..."); @@ -488,9 +539,8 @@ impl StacksClient { ClarityValue::UInt(round as u128), ClarityValue::UInt(reward_cycle as u128), ]; - let tx_fee = tx_fee.unwrap_or(0); - Self::build_signed_contract_call_transaction( + let unsigned_tx = Self::build_unsigned_contract_call_transaction( &contract_address, contract_name, function_name, @@ -499,17 +549,16 @@ impl StacksClient { self.tx_version, self.chain_id, nonce, - tx_fee, - ) + )?; + Ok(unsigned_tx) } /// Helper function to submit a transaction to the Stacks mempool - pub fn submit_transaction_with_retry( - &self, - tx: &StacksTransaction, - ) -> Result { + pub fn submit_transaction(&self, tx: &StacksTransaction) -> Result { let txid = tx.txid(); let tx = tx.serialize_to_vec(); + let timer = + crate::monitoring::new_rpc_call_timer(&self.transaction_path(), &self.http_origin); let send_request = || { self.stacks_node_client .post(self.transaction_path()) @@ -522,6 +571,7 @@ impl StacksClient { }) }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -550,12 +600,14 @@ impl StacksClient { let body = json!({"sender": self.stacks_address.to_string(), "arguments": args}).to_string(); let path = self.read_only_path(contract_addr, contract_name, function_name); + let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let response = self .stacks_node_client .post(path) .header("Content-Type", "application/json") .body(body) .send()?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -609,9 +661,13 @@ impl StacksClient { format!("{}/v2/stacker_set/{reward_cycle}", self.http_origin) } + fn fees_transaction_path(&self) -> String { + format!("{}/v2/fees/transaction", self.http_origin) + } + /// Helper function to create a stacks transaction for a modifying contract call #[allow(clippy::too_many_arguments)] - pub fn build_signed_contract_call_transaction( + pub fn build_unsigned_contract_call_transaction( contract_addr: &StacksAddress, contract_name: ContractName, function_name: ClarityName, @@ -620,7 +676,6 @@ impl StacksClient { tx_version: TransactionVersion, chain_id: u32, nonce: u64, - tx_fee: u64, ) -> Result { let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { address: *contract_addr, @@ -639,17 +694,22 @@ impl StacksClient { ); let mut unsigned_tx = StacksTransaction::new(tx_version, tx_auth, tx_payload); - - unsigned_tx.set_tx_fee(tx_fee); unsigned_tx.set_origin_nonce(nonce); unsigned_tx.anchor_mode = TransactionAnchorMode::Any; unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; unsigned_tx.chain_id = chain_id; + Ok(unsigned_tx) + } + /// Sign an unsigned transaction + pub fn sign_transaction( + &self, + unsigned_tx: StacksTransaction, + ) -> Result { let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); tx_signer - .sign_origin(stacks_private_key) + .sign_origin(&self.stacks_private_key) .map_err(|e| ClientError::TransactionGenerationFailure(e.to_string()))?; tx_signer @@ -662,31 +722,32 @@ impl StacksClient { #[cfg(test)] mod tests { + use std::collections::BTreeMap; use std::io::{BufWriter, Write}; use std::thread::spawn; + use blockstack_lib::burnchains::Address; use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; use blockstack_lib::chainstate::stacks::address::PoxAddress; use blockstack_lib::chainstate::stacks::boot::{ NakamotoSignerEntry, PoxStartCycleInfo, RewardSet, }; - use blockstack_lib::chainstate::stacks::ThresholdSignature; + use clarity::vm::types::{ + ListData, ListTypeData, ResponseData, SequenceData, TupleData, TupleTypeSignature, + TypeSignature, + }; use rand::thread_rng; use rand_core::RngCore; - use stacks_common::bitvec::BitVec; use stacks_common::consts::{CHAIN_ID_TESTNET, SIGNER_SLOTS_PER_USER}; - use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; - use stacks_common::util::hash::Sha512Trunc256Sum; - use stacks_common::util::secp256k1::MessageSignature; use wsts::curve::scalar::Scalar; use super::*; use crate::client::tests::{ build_account_nonce_response, build_get_approved_aggregate_key_response, - build_get_last_round_response, build_get_peer_info_response, build_get_pox_data_response, - build_get_round_info_response, build_get_vote_for_aggregate_key_response, - build_get_weight_threshold_response, build_read_only_response, write_response, - MockServerClient, + build_get_last_round_response, build_get_medium_estimated_fee_ustx_response, + build_get_peer_info_response, build_get_pox_data_response, build_get_round_info_response, + build_get_vote_for_aggregate_key_response, build_get_weight_threshold_response, + build_read_only_response, write_response, MockServerClient, }; #[test] @@ -854,12 +915,11 @@ mod tests { assert!(result.is_err()) } - #[ignore] #[test] fn transaction_contract_call_should_send_bytes_to_node() { let mock = MockServerClient::new(); let private_key = StacksPrivateKey::new(); - let tx = StacksClient::build_signed_contract_call_transaction( + let unsigned_tx = StacksClient::build_unsigned_contract_call_transaction( &mock.client.stacks_address, ContractName::from("contract-name"), ClarityName::from("function-name"), @@ -868,10 +928,11 @@ mod tests { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 0, - 10_000, ) .unwrap(); + let tx = mock.client.sign_transaction(unsigned_tx).unwrap(); + let mut tx_bytes = [0u8; 1024]; { let mut tx_bytes_writer = BufWriter::new(&mut tx_bytes[..]); @@ -889,7 +950,7 @@ mod tests { + 1; let tx_clone = tx.clone(); - let h = spawn(move || mock.client.submit_transaction_with_retry(&tx_clone)); + let h = spawn(move || mock.client.submit_transaction(&tx_clone)); let request_bytes = write_response( mock.server, @@ -906,7 +967,6 @@ mod tests { ); } - #[ignore] #[test] fn build_vote_for_aggregate_public_key_should_succeed() { let mock = MockServerClient::new(); @@ -917,19 +977,17 @@ mod tests { let reward_cycle = thread_rng().next_u64(); let h = spawn(move || { - mock.client.build_vote_for_aggregate_public_key( + mock.client.build_unsigned_vote_for_aggregate_public_key( signer_index, round, point, reward_cycle, - None, nonce, ) }); assert!(h.join().unwrap().is_ok()); } - #[ignore] #[test] fn broadcast_vote_for_aggregate_public_key_should_succeed() { let mock = MockServerClient::new(); @@ -938,28 +996,27 @@ mod tests { let signer_index = thread_rng().next_u32(); let round = thread_rng().next_u64(); let reward_cycle = thread_rng().next_u64(); + let unsigned_tx = mock + .client + .build_unsigned_vote_for_aggregate_public_key( + signer_index, + round, + point, + reward_cycle, + nonce, + ) + .unwrap(); + let tx = mock.client.sign_transaction(unsigned_tx).unwrap(); + let tx_clone = tx.clone(); + let h = spawn(move || mock.client.submit_transaction(&tx_clone)); - let h = spawn(move || { - let tx = mock - .client - .clone() - .build_vote_for_aggregate_public_key( - signer_index, - round, - point, - reward_cycle, - None, - nonce, - ) - .unwrap(); - mock.client.submit_transaction_with_retry(&tx) - }); - let mock = MockServerClient::from_config(mock.config); write_response( mock.server, - b"HTTP/1.1 200 OK\n\n4e99f99bc4a05437abb8c7d0c306618f45b203196498e2ebe287f10497124958", + format!("HTTP/1.1 200 OK\n\n{}", tx.txid()).as_bytes(), ); - assert!(h.join().unwrap().is_ok()); + let returned_txid = h.join().unwrap().unwrap(); + + assert_eq!(returned_txid, tx.txid()); } #[test] @@ -1009,9 +1066,59 @@ mod tests { #[test] fn parse_valid_signer_slots_should_succeed() { let mock = MockServerClient::new(); - let clarity_value_hex = - "0x070b000000050c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a8195196a9a7cf9c37cb13e1ed69a7bc047a84e050c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a6505471146dcf722f0580911183f28bef30a8a890c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a1d7f8e3936e5da5f32982cc47f31d7df9fb1b38a0c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a126d1a814313c952e34c7840acec9211e1727fb80c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a7374ea6bb39f2e8d3d334d62b9f302a977de339a"; - let value = ClarityValue::try_deserialize_hex_untyped(clarity_value_hex).unwrap(); + + let signers = [ + "ST20SA6BAK9YFKGVWP4Z1XNMTFF04FA2E0M8YRNNQ", + "ST1JGAHRH8VEFE8QGB04H261Z52ZF62MAH40CD6ZN", + "STEQZ3HS6VJXMQSJK0PC8ZSHTZFSZCDKHA7R60XT", + "ST96T6M18C9WJMQ39HW41B7CJ88Y2WKZQ1CK330M", + "ST1SQ9TKBPEFJX39X6D6P5EFK0AMQFQHKK9R0MJFC", + ]; + + let tuple_type_signature: TupleTypeSignature = [ + (ClarityName::from("num_slots"), TypeSignature::UIntType), + (ClarityName::from("signer"), TypeSignature::PrincipalType), + ] + .into_iter() + .collect::>() + .try_into() + .unwrap(); + + let list_data: Vec<_> = signers + .into_iter() + .map(|signer| { + let principal_data = StacksAddress::from_string(signer).unwrap().into(); + + let data_map = [ + ("num-slots".into(), ClarityValue::UInt(13)), + ( + "signer".into(), + ClarityValue::Principal(PrincipalData::Standard(principal_data)), + ), + ] + .into_iter() + .collect(); + + ClarityValue::Tuple(TupleData { + type_signature: tuple_type_signature.clone(), + data_map, + }) + }) + .collect(); + + let list_type_signature = + ListTypeData::new_list(TypeSignature::TupleType(tuple_type_signature), 5).unwrap(); + + let sequence = ClarityValue::Sequence(SequenceData::List(ListData { + data: list_data, + type_signature: list_type_signature, + })); + + let value = ClarityValue::Response(ResponseData { + committed: true, + data: Box::new(sequence), + }); + let signer_slots = mock.client.parse_signer_slots(value).unwrap(); assert_eq!(signer_slots.len(), 5); signer_slots @@ -1117,23 +1224,12 @@ mod tests { #[test] fn submit_block_for_validation_should_succeed() { let mock = MockServerClient::new(); - let header = NakamotoBlockHeader { - version: 1, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([0x04; 20]), - parent_block_id: StacksBlockId([0x05; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; + let header = NakamotoBlockHeader::empty(); let block = NakamotoBlock { header, txs: vec![], }; - let h = spawn(move || mock.client.submit_block_for_validation_with_retry(block)); + let h = spawn(move || mock.client.submit_block_for_validation(block)); write_response(mock.server, b"HTTP/1.1 200 OK\n\n"); assert!(h.join().unwrap().is_ok()); } @@ -1141,23 +1237,12 @@ mod tests { #[test] fn submit_block_for_validation_should_fail() { let mock = MockServerClient::new(); - let header = NakamotoBlockHeader { - version: 1, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([0x04; 20]), - parent_block_id: StacksBlockId([0x05; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; + let header = NakamotoBlockHeader::empty(); let block = NakamotoBlock { header, txs: vec![], }; - let h = spawn(move || mock.client.submit_block_for_validation_with_retry(block)); + let h = spawn(move || mock.client.submit_block_for_validation(block)); write_response(mock.server, b"HTTP/1.1 404 Not Found\n\n"); assert!(h.join().unwrap().is_err()); } @@ -1166,7 +1251,7 @@ mod tests { fn get_peer_info_should_succeed() { let mock = MockServerClient::new(); let (response, peer_info) = build_get_peer_info_response(None, None); - let h = spawn(move || mock.client.get_peer_info_with_retry()); + let h = spawn(move || mock.client.get_peer_info()); write_response(mock.server, response.as_bytes()); assert_eq!(h.join().unwrap().unwrap(), peer_info); } @@ -1207,7 +1292,7 @@ mod tests { let stackers_response_json = serde_json::to_string(&stackers_response) .expect("Failed to serialize get stacker response"); let response = format!("HTTP/1.1 200 OK\n\n{stackers_response_json}"); - let h = spawn(move || mock.client.get_reward_set_signers_with_retry(0)); + let h = spawn(move || mock.client.get_reward_set_signers(0)); write_response(mock.server, response.as_bytes()); assert_eq!(h.join().unwrap().unwrap(), stacker_set.signers); } @@ -1262,4 +1347,27 @@ mod tests { write_response(mock.server, round_response.as_bytes()); assert_eq!(h.join().unwrap().unwrap(), weight as u128); } + + #[test] + fn get_medium_estimated_fee_ustx_should_succeed() { + let mock = MockServerClient::new(); + let private_key = StacksPrivateKey::new(); + let unsigned_tx = StacksClient::build_unsigned_contract_call_transaction( + &mock.client.stacks_address, + ContractName::from("contract-name"), + ClarityName::from("function-name"), + &[], + &private_key, + TransactionVersion::Testnet, + CHAIN_ID_TESTNET, + 0, + ) + .unwrap(); + + let estimate = thread_rng().next_u64(); + let response = build_get_medium_estimated_fee_ustx_response(estimate).0; + let h = spawn(move || mock.client.get_medium_estimated_fee_ustx(&unsigned_tx)); + write_response(mock.server, response.as_bytes()); + assert_eq!(h.join().unwrap().unwrap(), estimate); + } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index dc48dda27a..f36ae91c26 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -31,11 +31,10 @@ use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPu use stacks_common::types::PrivateKey; use wsts::curve::scalar::Scalar; -use crate::signer::SignerSlotID; +use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; -// Default transaction fee in microstacks (if unspecificed in the config file) -// TODO: Use the fee estimation endpoint to get the default fee. +// Default transaction fee to use in microstacks (if unspecificed in the config file) const TX_FEE_USTX: u64 = 10_000; #[derive(thiserror::Error, Debug)] @@ -144,8 +143,10 @@ pub struct SignerConfig { pub nonce_timeout: Option, /// timeout to gather signature shares pub sign_timeout: Option, - /// the STX tx fee to use in uSTX + /// the STX tx fee to use in uSTX. pub tx_fee_ustx: u64, + /// If set, will use the estimated fee up to this amount. + pub max_tx_fee_ustx: Option, /// The path to the signer's database file pub db_path: PathBuf, } @@ -177,12 +178,16 @@ pub struct GlobalConfig { pub nonce_timeout: Option, /// timeout to gather signature shares pub sign_timeout: Option, - /// the STX tx fee to use in uSTX + /// the STX tx fee to use in uSTX. pub tx_fee_ustx: u64, + /// the max STX tx fee to use in uSTX when estimating fees + pub max_tx_fee_ustx: Option, /// the authorization password for the block proposal endpoint pub auth_password: String, /// The path to the signer's database file pub db_path: PathBuf, + /// Metrics endpoint + pub metrics_endpoint: Option, } /// Internal struct for loading up the config file @@ -209,12 +214,17 @@ struct RawConfigFile { pub nonce_timeout_ms: Option, /// timeout in (millisecs) to gather signature shares pub sign_timeout_ms: Option, - /// the STX tx fee to use in uSTX + /// the STX tx fee to use in uSTX. If not set, will default to TX_FEE_USTX pub tx_fee_ustx: Option, + /// the max STX tx fee to use in uSTX when estimating fees. + /// If not set, will use tx_fee_ustx. + pub max_tx_fee_ustx: Option, /// The authorization password for the block proposal endpoint pub auth_password: String, /// The path to the signer's database file or :memory: for an in-memory database pub db_path: String, + /// Metrics endpoint + pub metrics_endpoint: Option, } impl RawConfigFile { @@ -292,6 +302,19 @@ impl TryFrom for GlobalConfig { let sign_timeout = raw_data.sign_timeout_ms.map(Duration::from_millis); let db_path = raw_data.db_path.into(); + let metrics_endpoint = match raw_data.metrics_endpoint { + Some(endpoint) => Some( + endpoint + .to_socket_addrs() + .map_err(|_| ConfigError::BadField("endpoint".to_string(), endpoint.clone()))? + .next() + .ok_or_else(|| { + ConfigError::BadField("endpoint".to_string(), endpoint.clone()) + })?, + ), + None => None, + }; + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -306,8 +329,10 @@ impl TryFrom for GlobalConfig { nonce_timeout, sign_timeout, tx_fee_ustx: raw_data.tx_fee_ustx.unwrap_or(TX_FEE_USTX), + max_tx_fee_ustx: raw_data.max_tx_fee_ustx, auth_password: raw_data.auth_password, db_path, + metrics_endpoint, }) } } @@ -338,6 +363,10 @@ impl GlobalConfig { 0 => "default".to_string(), _ => (self.tx_fee_ustx as f64 / 1_000_000.0).to_string(), }; + let metrics_endpoint = match &self.metrics_endpoint { + Some(endpoint) => endpoint.to_string(), + None => "None".to_string(), + }; format!( r#" Stacks node host: {node_host} @@ -347,14 +376,16 @@ Public key: {public_key} Network: {network} Database path: {db_path} DKG transaction fee: {tx_fee} uSTX +Metrics endpoint: {metrics_endpoint} "#, node_host = self.node_host, endpoint = self.endpoint, - stacks_address = self.stacks_address.to_string(), + stacks_address = self.stacks_address, public_key = StacksPublicKey::from_private(&self.stacks_private_key).to_hex(), network = self.network, db_path = self.db_path.to_str().unwrap_or_default(), - tx_fee = tx_fee + tx_fee = tx_fee, + metrics_endpoint = metrics_endpoint, ) } } @@ -366,6 +397,7 @@ impl Display for GlobalConfig { } /// Helper function for building a signer config for each provided signer private key +#[allow(clippy::too_many_arguments)] pub fn build_signer_config_tomls( stacks_private_keys: &[StacksPrivateKey], node_host: &str, @@ -374,6 +406,9 @@ pub fn build_signer_config_tomls( password: &str, run_stamp: u16, mut port_start: usize, + max_tx_fee_ustx: Option, + tx_fee_ustx: Option, + mut metrics_port_start: Option, ) -> Vec { let mut signer_config_tomls = vec![]; @@ -405,11 +440,40 @@ db_path = "{db_path}" signer_config_toml = format!( r#" {signer_config_toml} -event_timeout = {event_timeout_ms} +event_timeout = {event_timeout_ms} +"# + ) + } + + if let Some(max_tx_fee_ustx) = max_tx_fee_ustx { + signer_config_toml = format!( + r#" +{signer_config_toml} +max_tx_fee_ustx = {max_tx_fee_ustx} +"# + ) + } + + if let Some(tx_fee_ustx) = tx_fee_ustx { + signer_config_toml = format!( + r#" +{signer_config_toml} +tx_fee_ustx = {tx_fee_ustx} "# ) } + if let Some(metrics_port) = metrics_port_start { + let metrics_endpoint = format!("localhost:{}", metrics_port); + signer_config_toml = format!( + r#" +{signer_config_toml} +metrics_endpoint = "{metrics_endpoint}" +"# + ); + metrics_port_start = Some(metrics_port + 1); + } + signer_config_tomls.push(signer_config_toml); } @@ -439,12 +503,126 @@ mod tests { password, rand::random(), 3000, + None, + None, + Some(4000), ); let config = RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); assert_eq!(config.auth_password, "melon"); + assert!(config.max_tx_fee_ustx.is_none()); + assert!(config.tx_fee_ustx.is_none()); + assert_eq!(config.metrics_endpoint, Some("localhost:4000".to_string())); + } + + #[test] + fn fee_options_should_deserialize_correctly() { + let pk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let node_host = "localhost"; + let network = Network::Testnet; + let password = "melon"; + + // Test both max_tx_fee_ustx and tx_fee_ustx are unspecified + let config_tomls = build_signer_config_tomls( + &[pk], + node_host, + None, + &network, + password, + rand::random(), + 3000, + None, + None, + None, + ); + + let config = + RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); + + assert!(config.max_tx_fee_ustx.is_none()); + assert!(config.tx_fee_ustx.is_none()); + + let config = GlobalConfig::try_from(config).expect("Failed to parse config"); + assert!(config.max_tx_fee_ustx.is_none()); + assert_eq!(config.tx_fee_ustx, TX_FEE_USTX); + + // Test both max_tx_fee_ustx and tx_fee_ustx are specified + let max_tx_fee_ustx = Some(1000); + let tx_fee_ustx = Some(2000); + let config_tomls = build_signer_config_tomls( + &[pk], + node_host, + None, + &network, + password, + rand::random(), + 3000, + max_tx_fee_ustx, + tx_fee_ustx, + None, + ); + + let config = + RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); + + assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); + assert_eq!(config.tx_fee_ustx, tx_fee_ustx); + + // Test only max_tx_fee_ustx is specified + let max_tx_fee_ustx = Some(1000); + let config_tomls = build_signer_config_tomls( + &[pk], + node_host, + None, + &network, + password, + rand::random(), + 3000, + max_tx_fee_ustx, + None, + None, + ); + + let config = + RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); + + assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); + assert!(config.tx_fee_ustx.is_none()); + + let config = GlobalConfig::try_from(config).expect("Failed to parse config"); + assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); + assert_eq!(config.tx_fee_ustx, TX_FEE_USTX); + + // Test only tx_fee_ustx is specified + let tx_fee_ustx = Some(1000); + let config_tomls = build_signer_config_tomls( + &[pk], + node_host, + None, + &network, + password, + rand::random(), + 3000, + None, + tx_fee_ustx, + None, + ); + + let config = + RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); + + assert!(config.max_tx_fee_ustx.is_none()); + assert_eq!(config.tx_fee_ustx, tx_fee_ustx); + + let config = GlobalConfig::try_from(config).expect("Failed to parse config"); + assert!(config.max_tx_fee_ustx.is_none()); + assert_eq!(Some(config.tx_fee_ustx), tx_fee_ustx); } #[test] @@ -462,6 +640,7 @@ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet Database path: :memory: DKG transaction fee: 0.01 uSTX +Metrics endpoint: 0.0.0.0:9090 "# ) ); diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 9dcd0a069f..0e8a6b10bc 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -26,11 +26,45 @@ pub mod cli; pub mod client; /// The configuration module for the signer pub mod config; -/// The coordinator selector for the signer -pub mod coordinator; +/// The monitoring server for the signer +pub mod monitoring; /// The primary runloop for the signer pub mod runloop; -/// The signer module for processing events -pub mod signer; -/// The state module for the signer -pub mod signerdb; +/// The v0 implementation of the signer. This does not include WSTS support +pub mod v0; +/// The v1 implementation of the singer. This includes WSTS support +pub mod v1; +use std::fmt::{Debug, Display}; +use std::sync::mpsc::Sender; + +use libsigner::{SignerEvent, SignerEventTrait}; +use wsts::state_machine::OperationResult; + +use crate::client::StacksClient; +use crate::config::SignerConfig; +use crate::runloop::RunLoopCommand; + +/// A trait which provides a common `Signer` interface for `v1` and `v2` +pub trait Signer: Debug + Display { + /// Create a new `Signer` instance + fn new(config: SignerConfig) -> Self; + /// Update the `Signer` instance's next reward cycle data with the latest `SignerConfig` + fn update_next_signer_data(&mut self, next_signer_config: &SignerConfig); + /// Get the reward cycle of the signer + fn reward_cycle(&self) -> u64; + /// Process an event + fn process_event( + &mut self, + stacks_client: &StacksClient, + event: Option<&SignerEvent>, + res: Sender>, + current_reward_cycle: u64, + ); + /// Process a command + fn process_command( + &mut self, + stacks_client: &StacksClient, + current_reward_cycle: u64, + command: Option, + ); +} diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 34a9f62dc3..75514fd2eb 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -26,40 +26,25 @@ extern crate serde; extern crate serde_json; extern crate toml; -use std::fs::File; -use std::io::{self, BufRead, Write}; -use std::path::{Path, PathBuf}; -use std::sync::mpsc::{channel, Receiver, Sender}; -use std::time::Duration; +use std::io::{self, Write}; -use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; use clarity::vm::types::QualifiedContractIdentifier; -use libsigner::{RunningSigner, Signer, SignerEventReceiver, SignerSession, StackerDBSession}; +use libsigner::{SignerSession, StackerDBSession}; use libstackerdb::StackerDBChunkData; -use slog::{slog_debug, slog_error, slog_info}; -use stacks_common::codec::read_next; -use stacks_common::types::chainstate::StacksPrivateKey; +use slog::slog_debug; +use stacks_common::debug; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; -use stacks_common::{debug, error, info}; use stacks_signer::cli::{ - Cli, Command, GenerateFilesArgs, GenerateStackingSignatureArgs, GetChunkArgs, - GetLatestChunkArgs, PutChunkArgs, RunDkgArgs, RunSignerArgs, SignArgs, StackerDBArgs, + Cli, Command, GenerateStackingSignatureArgs, GetChunkArgs, GetLatestChunkArgs, PutChunkArgs, + RunSignerArgs, StackerDBArgs, }; -use stacks_signer::config::{build_signer_config_tomls, GlobalConfig}; -use stacks_signer::runloop::{RunLoop, RunLoopCommand}; -use stacks_signer::signer::Command as SignerCommand; +use stacks_signer::config::GlobalConfig; +use stacks_signer::v1; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; -use wsts::state_machine::OperationResult; - -struct SpawnedSigner { - running_signer: RunningSigner>, - cmd_send: Sender, - res_recv: Receiver>, -} /// Create a new stacker db session fn stackerdb_session(host: &str, contract: QualifiedContractIdentifier) -> StackerDBSession { @@ -71,90 +56,15 @@ fn stackerdb_session(host: &str, contract: QualifiedContractIdentifier) -> Stack /// Write the chunk to stdout fn write_chunk_to_stdout(chunk_opt: Option>) { if let Some(chunk) = chunk_opt.as_ref() { - let bytes = io::stdout().write(chunk).unwrap(); - if bytes < chunk.len() { + let hexed_string = to_hex(chunk); + let hexed_chunk = hexed_string.as_bytes(); + let bytes = io::stdout().write(hexed_chunk).unwrap(); + if bytes < hexed_chunk.len() { print!( "Failed to write complete chunk to stdout. Missing {} bytes", - chunk.len() - bytes - ); - } - } -} - -// Spawn a running signer and return its handle, command sender, and result receiver -fn spawn_running_signer(path: &PathBuf) -> SpawnedSigner { - let config = GlobalConfig::try_from(path).unwrap(); - let endpoint = config.endpoint; - info!("Starting signer with config: {}", config); - let (cmd_send, cmd_recv) = channel(); - let (res_send, res_recv) = channel(); - let ev = SignerEventReceiver::new(config.network.is_mainnet()); - let runloop = RunLoop::from(config); - let mut signer: Signer, RunLoop, SignerEventReceiver> = - Signer::new(runloop, ev, cmd_recv, res_send); - let running_signer = signer.spawn(endpoint).unwrap(); - SpawnedSigner { - running_signer, - cmd_send, - res_recv, - } -} - -// Process a DKG result -fn process_dkg_result(dkg_res: &[OperationResult]) { - assert!(dkg_res.len() == 1, "Received unexpected number of results"); - let dkg = dkg_res.first().unwrap(); - match dkg { - OperationResult::Dkg(aggregate_key) => { - println!("Received aggregate group key: {aggregate_key}"); - } - OperationResult::Sign(signature) => { - panic!( - "Received unexpected signature ({},{})", - &signature.R, &signature.z, - ); - } - OperationResult::SignTaproot(schnorr_proof) => { - panic!( - "Received unexpected schnorr proof ({},{})", - &schnorr_proof.r, &schnorr_proof.s, + hexed_chunk.len() - bytes ); } - OperationResult::DkgError(dkg_error) => { - panic!("Received DkgError {}", dkg_error); - } - OperationResult::SignError(sign_error) => { - panic!("Received SignError {}", sign_error); - } - } -} - -// Process a Sign result -fn process_sign_result(sign_res: &[OperationResult]) { - assert!(sign_res.len() == 1, "Received unexpected number of results"); - let sign = sign_res.first().unwrap(); - match sign { - OperationResult::Dkg(aggregate_key) => { - panic!("Received unexpected aggregate group key: {aggregate_key}"); - } - OperationResult::Sign(signature) => { - panic!( - "Received bood signature ({},{})", - &signature.R, &signature.z, - ); - } - OperationResult::SignTaproot(schnorr_proof) => { - panic!( - "Received unexpected schnorr proof ({},{})", - &schnorr_proof.r, &schnorr_proof.s, - ); - } - OperationResult::DkgError(dkg_error) => { - panic!("Received DkgError {}", dkg_error); - } - OperationResult::SignError(sign_error) => { - panic!("Received SignError {}", sign_error); - } } } @@ -176,7 +86,9 @@ fn handle_list_chunks(args: StackerDBArgs) { debug!("Listing chunks..."); let mut session = stackerdb_session(&args.host, args.contract); let chunk_list = session.list_chunks().unwrap(); - println!("{}", serde_json::to_string(&chunk_list).unwrap()); + let chunk_list_json = serde_json::to_string(&chunk_list).unwrap(); + let hexed_json = to_hex(chunk_list_json.as_bytes()); + println!("{}", hexed_json); } fn handle_put_chunk(args: PutChunkArgs) { @@ -188,118 +100,13 @@ fn handle_put_chunk(args: PutChunkArgs) { println!("{}", serde_json::to_string(&chunk_ack).unwrap()); } -fn handle_dkg(args: RunDkgArgs) { - debug!("Running DKG..."); - let spawned_signer = spawn_running_signer(&args.config); - let dkg_command = RunLoopCommand { - reward_cycle: args.reward_cycle, - command: SignerCommand::Dkg, - }; - spawned_signer.cmd_send.send(dkg_command).unwrap(); - let dkg_res = spawned_signer.res_recv.recv().unwrap(); - process_dkg_result(&dkg_res); - spawned_signer.running_signer.stop(); -} - -fn handle_sign(args: SignArgs) { - debug!("Signing message..."); - let spawned_signer = spawn_running_signer(&args.config); - let Some(block) = read_next::(&mut &args.data[..]).ok() else { - error!("Unable to parse provided message as a NakamotoBlock."); - spawned_signer.running_signer.stop(); - return; - }; - let sign_command = RunLoopCommand { - reward_cycle: args.reward_cycle, - command: SignerCommand::Sign { - block, - is_taproot: false, - merkle_root: None, - }, - }; - spawned_signer.cmd_send.send(sign_command).unwrap(); - let sign_res = spawned_signer.res_recv.recv().unwrap(); - process_sign_result(&sign_res); - spawned_signer.running_signer.stop(); -} - -fn handle_dkg_sign(args: SignArgs) { - debug!("Running DKG and signing message..."); - let spawned_signer = spawn_running_signer(&args.config); - let Some(block) = read_next::(&mut &args.data[..]).ok() else { - error!("Unable to parse provided message as a NakamotoBlock."); - spawned_signer.running_signer.stop(); - return; - }; - let dkg_command = RunLoopCommand { - reward_cycle: args.reward_cycle, - command: SignerCommand::Dkg, - }; - let sign_command = RunLoopCommand { - reward_cycle: args.reward_cycle, - command: SignerCommand::Sign { - block, - is_taproot: false, - merkle_root: None, - }, - }; - // First execute DKG, then sign - spawned_signer.cmd_send.send(dkg_command).unwrap(); - spawned_signer.cmd_send.send(sign_command).unwrap(); - let dkg_res = spawned_signer.res_recv.recv().unwrap(); - process_dkg_result(&dkg_res); - let sign_res = spawned_signer.res_recv.recv().unwrap(); - process_sign_result(&sign_res); - spawned_signer.running_signer.stop(); -} - fn handle_run(args: RunSignerArgs) { debug!("Running signer..."); - let spawned_signer = spawn_running_signer(&args.config); + let config = GlobalConfig::try_from(&args.config).unwrap(); + let spawned_signer = v1::SpawnedSigner::from(config); println!("Signer spawned successfully. Waiting for messages to process..."); // Wait for the spawned signer to stop (will only occur if an error occurs) - let _ = spawned_signer.running_signer.join(); -} - -fn handle_generate_files(args: GenerateFilesArgs) { - debug!("Generating files..."); - let signer_stacks_private_keys = if let Some(path) = args.private_keys { - let file = File::open(path).unwrap(); - let reader = io::BufReader::new(file); - - let private_keys: Vec = reader.lines().collect::>().unwrap(); - println!("{}", StacksPrivateKey::new().to_hex()); - let private_keys = private_keys - .iter() - .map(|key| StacksPrivateKey::from_hex(key).expect("Failed to parse private key.")) - .collect::>(); - if private_keys.is_empty() { - panic!("Private keys file is empty."); - } - private_keys - } else { - let num_signers = args.num_signers.unwrap(); - if num_signers == 0 { - panic!("--num-signers must be non-zero."); - } - (0..num_signers) - .map(|_| StacksPrivateKey::new()) - .collect::>() - }; - - let signer_config_tomls = build_signer_config_tomls( - &signer_stacks_private_keys, - &args.host.to_string(), - args.timeout.map(Duration::from_millis), - &args.network, - &args.password, - rand::random(), - 3000, - ); - debug!("Built {:?} signer config tomls.", signer_config_tomls.len()); - for (i, file_contents) in signer_config_tomls.iter().enumerate() { - write_file(&args.dir, &format!("signer-{}.toml", i), file_contents); - } + let _ = spawned_signer.join(); } fn handle_generate_stacking_signature( @@ -355,15 +162,6 @@ fn handle_check_config(args: RunSignerArgs) { println!("Config: {}", config); } -/// Helper function for writing the given contents to filename in the given directory -fn write_file(dir: &Path, filename: &str, contents: &str) { - let file_path = dir.join(filename); - let filename = file_path.to_str().unwrap(); - let mut file = File::create(filename).unwrap(); - file.write_all(contents.as_bytes()).unwrap(); - println!("Created file: {}", filename); -} - fn main() { let cli = Cli::parse(); @@ -385,21 +183,9 @@ fn main() { Command::PutChunk(args) => { handle_put_chunk(args); } - Command::Dkg(args) => { - handle_dkg(args); - } - Command::DkgSign(args) => { - handle_dkg_sign(args); - } - Command::Sign(args) => { - handle_sign(args); - } Command::Run(args) => { handle_run(args); } - Command::GenerateFiles(args) => { - handle_generate_files(args); - } Command::GenerateStackingSignature(args) => { handle_generate_stacking_signature(args, true); } @@ -425,6 +211,7 @@ pub mod tests { use super::{handle_generate_stacking_signature, *}; use crate::{GenerateStackingSignatureArgs, GlobalConfig}; + #[allow(clippy::too_many_arguments)] fn call_verify_signer_sig( pox_addr: &PoxAddress, reward_cycle: u128, diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs new file mode 100644 index 0000000000..0ecc99b5f8 --- /dev/null +++ b/stacks-signer/src/monitoring/mod.rs @@ -0,0 +1,188 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[cfg(feature = "monitoring_prom")] +use ::prometheus::HistogramTimer; +#[cfg(feature = "monitoring_prom")] +use slog::slog_error; +#[cfg(not(feature = "monitoring_prom"))] +use slog::slog_warn; +#[cfg(feature = "monitoring_prom")] +use stacks_common::error; +#[cfg(not(feature = "monitoring_prom"))] +use stacks_common::warn; + +use crate::config::GlobalConfig; + +#[cfg(feature = "monitoring_prom")] +mod prometheus; + +#[cfg(feature = "monitoring_prom")] +mod server; + +/// Update stacks tip height gauge +#[allow(unused_variables)] +pub fn update_stacks_tip_height(height: i64) { + #[cfg(feature = "monitoring_prom")] + prometheus::STACKS_TIP_HEIGHT_GAUGE.set(height); +} + +/// Update the current reward cycle +#[allow(unused_variables)] +pub fn update_reward_cycle(reward_cycle: i64) { + #[cfg(feature = "monitoring_prom")] + prometheus::CURRENT_REWARD_CYCLE.set(reward_cycle); +} + +/// Increment the block validation responses counter +#[allow(unused_variables)] +pub fn increment_block_validation_responses(accepted: bool) { + #[cfg(feature = "monitoring_prom")] + { + let label_value = if accepted { "accepted" } else { "rejected" }; + prometheus::BLOCK_VALIDATION_RESPONSES + .with_label_values(&[label_value]) + .inc(); + } +} + +/// Increment the block responses sent counter +#[allow(unused_variables)] +pub fn increment_block_responses_sent(accepted: bool) { + #[cfg(feature = "monitoring_prom")] + { + let label_value = if accepted { "accepted" } else { "rejected" }; + prometheus::BLOCK_RESPONSES_SENT + .with_label_values(&[label_value]) + .inc(); + } +} + +/// Increment the signer inbound messages counter +#[allow(unused_variables)] +pub fn increment_signer_inbound_messages(amount: i64) { + #[cfg(feature = "monitoring_prom")] + prometheus::SIGNER_INBOUND_MESSAGES.inc_by(amount); +} + +/// Increment the coordinator inbound messages counter +#[allow(unused_variables)] +pub fn increment_coordinator_inbound_messages(amount: i64) { + #[cfg(feature = "monitoring_prom")] + prometheus::COORDINATOR_INBOUND_MESSAGES.inc_by(amount); +} + +/// Increment the number of inbound packets received +#[allow(unused_variables)] +pub fn increment_inbound_packets(amount: i64) { + #[cfg(feature = "monitoring_prom")] + prometheus::INBOUND_PACKETS_RECEIVED.inc_by(amount); +} + +/// Increment the number of commands processed +#[allow(unused_variables)] +pub fn increment_commands_processed(command_type: &str) { + #[cfg(feature = "monitoring_prom")] + prometheus::COMMANDS_PROCESSED + .with_label_values(&[command_type]) + .inc(); +} + +/// Increment the number of DKG votes submitted +#[allow(unused_variables)] +pub fn increment_dkg_votes_submitted() { + #[cfg(feature = "monitoring_prom")] + prometheus::DGK_VOTES_SUBMITTED.inc(); +} + +/// Increment the number of commands processed +#[allow(unused_variables)] +pub fn increment_operation_results(operation_type: &str) { + #[cfg(feature = "monitoring_prom")] + prometheus::OPERATION_RESULTS + .with_label_values(&[operation_type]) + .inc(); +} + +/// Increment the number of block proposals received +#[allow(unused_variables)] +pub fn increment_block_proposals_received() { + #[cfg(feature = "monitoring_prom")] + prometheus::BLOCK_PROPOSALS_RECEIVED.inc(); +} + +/// Update the stx balance of the signer +#[allow(unused_variables)] +pub fn update_signer_stx_balance(balance: i64) { + #[cfg(feature = "monitoring_prom")] + prometheus::SIGNER_STX_BALANCE.set(balance); +} + +/// Update the signer nonce metric +#[allow(unused_variables)] +pub fn update_signer_nonce(nonce: u64) { + #[cfg(feature = "monitoring_prom")] + prometheus::SIGNER_NONCE.set(nonce as i64); +} + +/// Start a new RPC call timer. +/// The `origin` parameter is the base path of the RPC call, e.g. `http://node.com`. +/// The `origin` parameter is removed from `full_path` when storing in prometheus. +#[cfg(feature = "monitoring_prom")] +pub fn new_rpc_call_timer(full_path: &str, origin: &str) -> HistogramTimer { + let path = &full_path[origin.len()..]; + let histogram = prometheus::SIGNER_RPC_CALL_LATENCIES_HISTOGRAM.with_label_values(&[path]); + histogram.start_timer() +} + +/// NoOp timer uses for monitoring when the monitoring feature is not enabled. +pub struct NoOpTimer; +impl NoOpTimer { + /// NoOp method to stop recording when the monitoring feature is not enabled. + pub fn stop_and_record(&self) {} +} + +/// Stop and record the no-op timer. +#[cfg(not(feature = "monitoring_prom"))] +pub fn new_rpc_call_timer(_full_path: &str, _origin: &str) -> NoOpTimer { + NoOpTimer +} + +/// Start serving monitoring metrics. +/// This will only serve the metrics if the `monitoring_prom` feature is enabled. +#[allow(unused_variables)] +pub fn start_serving_monitoring_metrics(config: GlobalConfig) -> Result<(), String> { + #[cfg(feature = "monitoring_prom")] + { + if config.metrics_endpoint.is_none() { + return Ok(()); + } + let thread = std::thread::Builder::new() + .name("signer_metrics".to_string()) + .spawn(move || { + if let Err(monitoring_err) = server::MonitoringServer::start(&config) { + error!("Monitoring: Error in metrics server: {:?}", monitoring_err); + } + }); + } + #[cfg(not(feature = "monitoring_prom"))] + { + if config.metrics_endpoint.is_some() { + warn!("Not starting monitoring metrics server as the monitoring_prom feature is not enabled"); + } + } + Ok(()) +} diff --git a/stacks-signer/src/monitoring/prometheus.rs b/stacks-signer/src/monitoring/prometheus.rs new file mode 100644 index 0000000000..c78db1299d --- /dev/null +++ b/stacks-signer/src/monitoring/prometheus.rs @@ -0,0 +1,105 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use lazy_static::lazy_static; +use prometheus::{ + gather, histogram_opts, opts, register_histogram_vec, register_int_counter, + register_int_counter_vec, register_int_gauge, Encoder, HistogramVec, IntCounter, IntCounterVec, + IntGauge, TextEncoder, +}; + +lazy_static! { + pub static ref STACKS_TIP_HEIGHT_GAUGE: IntGauge = register_int_gauge!(opts!( + "stacks_signer_stacks_node_height", + "The current height of the Stacks node" + )) + .unwrap(); + pub static ref BLOCK_VALIDATION_RESPONSES: IntCounterVec = register_int_counter_vec!( + "stacks_signer_block_validation_responses", + "The number of block validation responses. `response_type` is either 'accepted' or 'rejected'", + &["response_type"] + ) + .unwrap(); + pub static ref BLOCK_RESPONSES_SENT: IntCounterVec = register_int_counter_vec!( + "stacks_signer_block_responses_sent", + "The number of block responses sent. `response_type` is either 'accepted' or 'rejected'", + &["response_type"] + ) + .unwrap(); + pub static ref SIGNER_INBOUND_MESSAGES: IntCounter = register_int_counter!(opts!( + "stacks_signer_inbound_messages", + "The number of inbound messages received by the signer" + )) + .unwrap(); + pub static ref COORDINATOR_INBOUND_MESSAGES: IntCounter = register_int_counter!(opts!( + "stacks_signer_coordinator_inbound_messages", + "The number of inbound messages received as a coordinator" + )) + .unwrap(); + pub static ref INBOUND_PACKETS_RECEIVED: IntCounter = register_int_counter!(opts!( + "stacks_signer_inbound_packets_received", + "The number of inbound packets received by the signer" + )) + .unwrap(); + pub static ref COMMANDS_PROCESSED: IntCounterVec = register_int_counter_vec!( + "stacks_signer_commands_processed", + "The number of commands processed by the signer", + &["command_type"] + ) + .unwrap(); + pub static ref DGK_VOTES_SUBMITTED: IntCounter = register_int_counter!(opts!( + "stacks_signer_dgk_votes_submitted", + "The number of DGK votes submitted by the signer" + )) + .unwrap(); + pub static ref OPERATION_RESULTS: IntCounterVec = register_int_counter_vec!( + "stacks_signer_operation_results_dkg", + "The number of DKG operation results", + &["operation_type"] + ) + .unwrap(); + pub static ref BLOCK_PROPOSALS_RECEIVED: IntCounter = register_int_counter!(opts!( + "stacks_signer_block_proposals_received", + "The number of block proposals received by the signer" + )) + .unwrap(); + pub static ref CURRENT_REWARD_CYCLE: IntGauge = register_int_gauge!(opts!( + "stacks_signer_current_reward_cycle", + "The current reward cycle" + )).unwrap(); + pub static ref SIGNER_STX_BALANCE: IntGauge = register_int_gauge!(opts!( + "stacks_signer_stx_balance", + "The current STX balance of the signer" + )).unwrap(); + pub static ref SIGNER_NONCE: IntGauge = register_int_gauge!(opts!( + "stacks_signer_nonce", + "The current nonce of the signer" + )).unwrap(); + + pub static ref SIGNER_RPC_CALL_LATENCIES_HISTOGRAM: HistogramVec = register_histogram_vec!(histogram_opts!( + "stacks_signer_node_rpc_call_latencies_histogram", + "Time (seconds) measuring round-trip RPC call latency to the Stacks node" + // Will use DEFAULT_BUCKETS = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0] by default + ), &["path"]).unwrap(); +} + +pub fn gather_metrics_string() -> String { + let mut buffer = Vec::new(); + let encoder = TextEncoder::new(); + let metrics_families = gather(); + encoder.encode(&metrics_families, &mut buffer).unwrap(); + String::from_utf8(buffer).unwrap() +} diff --git a/stacks-signer/src/monitoring/server.rs b/stacks-signer/src/monitoring/server.rs new file mode 100644 index 0000000000..9cecd41ed7 --- /dev/null +++ b/stacks-signer/src/monitoring/server.rs @@ -0,0 +1,245 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::SocketAddr; +use std::time::Instant; + +use clarity::util::hash::to_hex; +use clarity::util::secp256k1::Secp256k1PublicKey; +use slog::{slog_debug, slog_error, slog_info, slog_warn}; +use stacks_common::{debug, error, info, warn}; +use tiny_http::{Response as HttpResponse, Server as HttpServer}; + +use super::{update_reward_cycle, update_signer_stx_balance}; +use crate::client::{ClientError, StacksClient}; +use crate::config::{GlobalConfig, Network}; +use crate::monitoring::prometheus::gather_metrics_string; +use crate::monitoring::{update_signer_nonce, update_stacks_tip_height}; + +#[derive(thiserror::Error, Debug)] +/// Monitoring server errors +pub enum MonitoringError { + /// Already bound to an address + #[error("Already bound to an address")] + AlreadyBound, + /// Server terminated + #[error("Server terminated")] + Terminated, + /// No endpoint configured + #[error("Prometheus endpoint not configured.")] + EndpointNotConfigured, + /// Error fetching metrics from stacks node + #[error("Error fetching data from stacks node: {0}")] + FetchError(#[from] ClientError), +} + +/// Metrics and monitoring server +pub struct MonitoringServer { + http_server: HttpServer, + local_addr: SocketAddr, + stacks_client: StacksClient, + last_metrics_poll: Instant, + network: Network, + public_key: Secp256k1PublicKey, + stacks_node_client: reqwest::blocking::Client, + stacks_node_origin: String, +} + +impl MonitoringServer { + pub fn new( + http_server: HttpServer, + local_addr: SocketAddr, + stacks_client: StacksClient, + network: Network, + public_key: Secp256k1PublicKey, + stacks_node_origin: String, + ) -> Self { + Self { + http_server, + local_addr, + stacks_client, + last_metrics_poll: Instant::now(), + network, + public_key, + stacks_node_client: reqwest::blocking::Client::new(), + stacks_node_origin, + } + } + + /// Start and run the metrics server + pub fn start(config: &GlobalConfig) -> Result<(), MonitoringError> { + let Some(endpoint) = config.metrics_endpoint else { + return Err(MonitoringError::EndpointNotConfigured); + }; + let stacks_client = StacksClient::from(config); + let http_server = HttpServer::http(endpoint).map_err(|_| MonitoringError::AlreadyBound)?; + let public_key = Secp256k1PublicKey::from_private(&config.stacks_private_key); + let mut server = MonitoringServer::new( + http_server, + endpoint, + stacks_client, + config.network.clone(), + public_key, + format!("http://{}", config.node_host), + ); + server.update_metrics()?; + server.main_loop() + } + + // /// Start and run the metrics server + // pub fn run(endpoint: SocketAddr, stacks_client: StacksClient) -> Result<(), MonitoringError> { + // let http_server = HttpServer::http(endpoint).map_err(|_| MonitoringError::AlreadyBound)?; + // let mut server = PrometheusMetrics::new(http_server, endpoint, stacks_client); + // server.main_loop() + // } + + /// Main listener loop of metrics server + pub fn main_loop(&mut self) -> Result<(), MonitoringError> { + info!("{}: Starting Prometheus metrics server", self); + loop { + if let Err(err) = self.refresh_metrics() { + error!("Monitoring: Error refreshing metrics: {:?}", err); + } + let request = match self.http_server.recv() { + Ok(request) => request, + Err(err) => { + error!("Monitoring: Error receiving request: {:?}", err); + return Err(MonitoringError::Terminated); + } + }; + + debug!("{}: received request {}", self, request.url()); + + if request.url() == "/metrics" { + let response = HttpResponse::from_string(gather_metrics_string()); + request.respond(response).expect("Failed to send response"); + continue; + } + + if request.url() == "/info" { + request + .respond(HttpResponse::from_string(self.get_info_response())) + .expect("Failed to respond to request"); + continue; + } + + // return 200 OK for "/" + if request.url() == "/" { + request + .respond(HttpResponse::from_string("OK")) + .expect("Failed to respond to request"); + continue; + } + + // Run heartbeat check to test connection to the node + if request.url() == "/heartbeat" { + let (msg, status) = if self.heartbeat() { + ("OK", 200) + } else { + ("Failed", 500) + }; + request + .respond(HttpResponse::from_string(msg).with_status_code(status)) + .expect("Failed to respond to request"); + continue; + } + + // unknown request, return 404 + request + .respond(HttpResponse::from_string("Not Found").with_status_code(404)) + .expect("Failed to respond to request"); + } + } + + /// Check to see if metrics need to be refreshed + fn refresh_metrics(&mut self) -> Result<(), MonitoringError> { + let now = Instant::now(); + if now.duration_since(self.last_metrics_poll).as_secs() > 60 { + self.last_metrics_poll = now; + self.update_metrics()?; + } + Ok(()) + } + + /// Update metrics by making RPC calls to the Stacks node + fn update_metrics(&self) -> Result<(), MonitoringError> { + debug!("{}: Updating metrics", self); + let peer_info = self.stacks_client.get_peer_info()?; + if let Ok(height) = i64::try_from(peer_info.stacks_tip_height) { + update_stacks_tip_height(height); + } else { + warn!( + "Failed to parse stacks tip height: {}", + peer_info.stacks_tip_height + ); + } + let pox_info = self.stacks_client.get_pox_data()?; + if let Ok(reward_cycle) = i64::try_from(pox_info.reward_cycle_id) { + update_reward_cycle(reward_cycle); + } + let signer_stx_addr = self.stacks_client.get_signer_address(); + let account_entry = self.stacks_client.get_account_entry(signer_stx_addr)?; + let balance = i64::from_str_radix(&account_entry.balance[2..], 16).map_err(|e| { + MonitoringError::FetchError(ClientError::MalformedClarityValue(format!( + "Failed to parse balance: {} with err: {}", + &account_entry.balance, e, + ))) + })?; + update_signer_nonce(account_entry.nonce); + update_signer_stx_balance(balance); + Ok(()) + } + + /// Build a JSON response for non-metrics requests + fn get_info_response(&self) -> String { + // let public_key = Secp256k1PublicKey::from_private(&self.stacks_client.publ); + serde_json::to_string(&serde_json::json!({ + "signerPublicKey": to_hex(&self.public_key.to_bytes_compressed()), + "network": self.network.to_string(), + "stxAddress": self.stacks_client.get_signer_address().to_string(), + })) + .expect("Failed to serialize JSON") + } + + /// Poll the Stacks node's `v2/info` endpoint to validate the connection + fn heartbeat(&self) -> bool { + let url = format!("{}/v2/info", self.stacks_node_origin); + let response = self.stacks_node_client.get(url).send(); + match response { + Ok(response) => { + if response.status().is_success() { + true + } else { + warn!( + "Monitoring: Heartbeat failed with status: {}", + response.status() + ); + false + } + } + Err(err) => { + warn!("Monitoring: Heartbeat failed with error: {:?}", err); + false + } + } + } +} + +impl std::fmt::Display for MonitoringServer { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Signer monitoring server ({})", self.local_addr) + } +} diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 4491650090..d9cc6743d8 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -1,4 +1,5 @@ use std::collections::VecDeque; +use std::fmt::Debug; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -20,16 +21,34 @@ use std::time::Duration; use blockstack_lib::burnchains::PoxConstants; use blockstack_lib::chainstate::stacks::boot::SIGNERS_NAME; use blockstack_lib::util_lib::boot::boot_code_id; +use clarity::codec::StacksMessageCodec; use hashbrown::HashMap; -use libsigner::{SignerEntries, SignerEvent, SignerRunLoop}; +use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerRunLoop}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::{debug, error, info, warn}; +use wsts::common::MerkleRoot; use wsts::state_machine::OperationResult; -use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; +use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID, StacksClient}; use crate::config::{GlobalConfig, SignerConfig}; -use crate::signer::{Command as SignerCommand, Signer, SignerSlotID}; +use crate::Signer as SignerTrait; + +/// Which signer operation to perform +#[derive(PartialEq, Clone, Debug)] +pub enum SignerCommand { + /// Generate a DKG aggregate public key + Dkg, + /// Sign a message + Sign { + /// The block to sign over + block_proposal: BlockProposal, + /// Whether to make a taproot signature + is_taproot: bool, + /// Taproot merkle root + merkle_root: Option, + }, +} /// Which operation to perform #[derive(PartialEq, Clone, Debug)] @@ -56,8 +75,8 @@ pub enum State { pub struct RewardCycleInfo { /// The current reward cycle pub reward_cycle: u64, - /// The reward phase cycle length - pub reward_phase_block_length: u64, + /// The total reward cycle length + pub reward_cycle_length: u64, /// The prepare phase length pub prepare_phase_block_length: u64, /// The first burn block height @@ -70,26 +89,42 @@ impl RewardCycleInfo { /// Check if the provided burnchain block height is part of the reward cycle pub const fn is_in_reward_cycle(&self, burnchain_block_height: u64) -> bool { let blocks_mined = burnchain_block_height.saturating_sub(self.first_burnchain_block_height); - let reward_cycle_length = self - .reward_phase_block_length - .saturating_add(self.prepare_phase_block_length); - let reward_cycle = blocks_mined / reward_cycle_length; + let reward_cycle = blocks_mined / self.reward_cycle_length; self.reward_cycle == reward_cycle } + /// Get the reward cycle for a specific burnchain block height + pub const fn get_reward_cycle(&self, burnchain_block_height: u64) -> u64 { + let blocks_mined = burnchain_block_height.saturating_sub(self.first_burnchain_block_height); + blocks_mined / self.reward_cycle_length + } + /// Check if the provided burnchain block height is in the prepare phase pub fn is_in_prepare_phase(&self, burnchain_block_height: u64) -> bool { PoxConstants::static_is_in_prepare_phase( self.first_burnchain_block_height, - self.reward_phase_block_length, + self.reward_cycle_length, self.prepare_phase_block_length, burnchain_block_height, ) } + + /// Check if the provided burnchain block height is in the prepare phase of the next cycle + pub fn is_in_next_prepare_phase(&self, burnchain_block_height: u64) -> bool { + let effective_height = burnchain_block_height - self.first_burnchain_block_height; + let reward_index = effective_height % self.reward_cycle_length; + + reward_index >= (self.reward_cycle_length - self.prepare_phase_block_length) + && self.get_reward_cycle(burnchain_block_height) == self.reward_cycle + } } /// The runloop for the stacks signer -pub struct RunLoop { +pub struct RunLoop +where + Signer: SignerTrait, + T: StacksMessageCodec + Clone + Send + Debug, +{ /// Configuration info pub config: GlobalConfig, /// The stacks node client @@ -103,11 +138,13 @@ pub struct RunLoop { pub commands: VecDeque, /// The current reward cycle info. Only None if the runloop is uninitialized pub current_reward_cycle_info: Option, + /// Phantom data for the message codec + _phantom_data: std::marker::PhantomData, } -impl From for RunLoop { - /// Creates new runloop from a config - fn from(config: GlobalConfig) -> Self { +impl, T: StacksMessageCodec + Clone + Send + Debug> RunLoop { + /// Create a new signer runloop from the provided configuration + pub fn new(config: GlobalConfig) -> Self { let stacks_client = StacksClient::from(&config); Self { config, @@ -116,11 +153,9 @@ impl From for RunLoop { state: State::Uninitialized, commands: VecDeque::new(), current_reward_cycle_info: None, + _phantom_data: std::marker::PhantomData, } } -} - -impl RunLoop { /// Get the registered signers for a specific reward cycle /// Returns None if no signers are registered or its not Nakamoto cycle pub fn get_parsed_reward_set( @@ -128,10 +163,7 @@ impl RunLoop { reward_cycle: u64, ) -> Result, ClientError> { debug!("Getting registered signers for reward cycle {reward_cycle}..."); - let Some(signers) = self - .stacks_client - .get_reward_set_signers_with_retry(reward_cycle)? - else { + let Some(signers) = self.stacks_client.get_reward_set_signers(reward_cycle)? else { warn!("No reward set signers found for reward cycle {reward_cycle}."); return Ok(None); }; @@ -213,6 +245,7 @@ impl RunLoop { nonce_timeout: self.config.nonce_timeout, sign_timeout: self.config.sign_timeout, tx_fee_ustx: self.config.tx_fee_ustx, + max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), }) } @@ -227,20 +260,14 @@ impl RunLoop { let prior_reward_cycle = reward_cycle.saturating_sub(1); let prior_reward_set = prior_reward_cycle % 2; if let Some(signer) = self.stacks_signers.get_mut(&prior_reward_set) { - if signer.reward_cycle == prior_reward_cycle { + if signer.reward_cycle() == prior_reward_cycle { // The signers have been calculated for the next reward cycle. Update the current one debug!("{signer}: Next reward cycle ({reward_cycle}) signer set calculated. Reconfiguring current reward cycle signer."); - signer.next_signer_addresses = new_signer_config - .signer_entries - .signer_ids - .keys() - .copied() - .collect(); - signer.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); + signer.update_next_signer_data(&new_signer_config); } } } - let new_signer = Signer::from(new_signer_config); + let new_signer = Signer::new(new_signer_config); info!("{new_signer} initialized."); self.stacks_signers.insert(reward_index, new_signer); } else { @@ -258,7 +285,8 @@ impl RunLoop { let current_reward_cycle = reward_cycle_info.reward_cycle; self.refresh_signer_config(current_reward_cycle); // We should only attempt to initialize the next reward cycle signer if we are in the prepare phase of the next reward cycle - if reward_cycle_info.is_in_prepare_phase(reward_cycle_info.last_burnchain_block_height) { + if reward_cycle_info.is_in_next_prepare_phase(reward_cycle_info.last_burnchain_block_height) + { self.refresh_signer_config(current_reward_cycle.saturating_add(1)); } self.current_reward_cycle_info = Some(reward_cycle_info); @@ -275,23 +303,39 @@ impl RunLoop { .current_reward_cycle_info .as_mut() .expect("FATAL: cannot be an initialized signer with no reward cycle info."); + let current_reward_cycle = reward_cycle_info.reward_cycle; + let block_reward_cycle = reward_cycle_info.get_reward_cycle(current_burn_block_height); + // First ensure we refresh our view of the current reward cycle information - if !reward_cycle_info.is_in_reward_cycle(current_burn_block_height) { + if block_reward_cycle != current_reward_cycle { let new_reward_cycle_info = retry_with_exponential_backoff(|| { - self.stacks_client + let info = self + .stacks_client .get_current_reward_cycle_info() - .map_err(backoff::Error::transient) + .map_err(backoff::Error::transient)?; + if info.reward_cycle < block_reward_cycle { + // If the stacks-node is still processing the burn block, the /v2/pox endpoint + // may return the previous reward cycle. In this case, we should retry. + return Err(backoff::Error::transient(ClientError::InvalidResponse( + format!("Received reward cycle ({}) does not match the expected reward cycle ({}) for block {}.", + info.reward_cycle, + block_reward_cycle, + current_burn_block_height + ), + ))); + } + Ok(info) })?; *reward_cycle_info = new_reward_cycle_info; } let current_reward_cycle = reward_cycle_info.reward_cycle; // We should only attempt to refresh the signer if we are not configured for the next reward cycle yet and we received a new burn block for its prepare phase - if reward_cycle_info.is_in_prepare_phase(current_burn_block_height) { + if reward_cycle_info.is_in_next_prepare_phase(current_burn_block_height) { let next_reward_cycle = current_reward_cycle.saturating_add(1); if self .stacks_signers .get(&(next_reward_cycle % 2)) - .map(|signer| signer.reward_cycle != next_reward_cycle) + .map(|signer| signer.reward_cycle() != next_reward_cycle) .unwrap_or(true) { info!("Received a new burnchain block height ({current_burn_block_height}) in the prepare phase of the next reward cycle ({next_reward_cycle}). Checking for signer registration..."); @@ -310,7 +354,7 @@ impl RunLoop { fn cleanup_stale_signers(&mut self, current_reward_cycle: u64) { let mut to_delete = Vec::new(); for (idx, signer) in &mut self.stacks_signers { - if signer.reward_cycle < current_reward_cycle { + if signer.reward_cycle() < current_reward_cycle { debug!("{signer}: Signer's tenure has completed."); to_delete.push(*idx); continue; @@ -322,7 +366,9 @@ impl RunLoop { } } -impl SignerRunLoop, RunLoopCommand> for RunLoop { +impl, T: StacksMessageCodec + Clone + Send + Debug> + SignerRunLoop, RunLoopCommand, T> for RunLoop +{ fn set_event_timeout(&mut self, timeout: Duration) { self.config.event_timeout = timeout; } @@ -333,7 +379,7 @@ impl SignerRunLoop, RunLoopCommand> for RunLoop { fn run_one_pass( &mut self, - event: Option, + event: Option>, cmd: Option, res: Sender>, ) -> Option> { @@ -366,77 +412,38 @@ impl SignerRunLoop, RunLoopCommand> for RunLoop { if self.state == State::NoRegisteredSigners { let next_reward_cycle = current_reward_cycle.saturating_add(1); if let Some(event) = event { - info!("Signer is not registered for the current reward cycle ({current_reward_cycle}) or next reward cycle ({next_reward_cycle}). Waiting for confirmed registration..."); + info!("Signer is not registered for the current reward cycle ({current_reward_cycle}). Reward set is not yet determined or signer is not registered for the upcoming reward cycle ({next_reward_cycle})."); warn!("Ignoring event: {event:?}"); } return None; } for signer in self.stacks_signers.values_mut() { - let event_parity = match event { - Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2), - // Block proposal events do have reward cycles, but each proposal has its own cycle, - // and the vec could be heterogenous, so, don't differentiate. - Some(SignerEvent::MinerMessages(..)) - | Some(SignerEvent::NewBurnBlock(_)) - | Some(SignerEvent::StatusCheck) - | None => None, - Some(SignerEvent::SignerMessages(msg_parity, ..)) => { - Some(u64::from(msg_parity) % 2) - } - }; - let other_signer_parity = (signer.reward_cycle + 1) % 2; - if event_parity == Some(other_signer_parity) { - continue; - } - - if signer.approved_aggregate_public_key.is_none() { - if let Err(e) = retry_with_exponential_backoff(|| { - signer - .update_dkg(&self.stacks_client, current_reward_cycle) - .map_err(backoff::Error::transient) - }) { - error!("{signer}: failed to update DKG: {e}"); - } - } - signer.refresh_coordinator(); - if let Err(e) = signer.process_event( + signer.process_event( &self.stacks_client, event.as_ref(), res.clone(), current_reward_cycle, - ) { - error!("{signer}: errored processing event: {e}"); - } - if let Some(command) = self.commands.pop_front() { - let reward_cycle = command.reward_cycle; - if signer.reward_cycle != reward_cycle { - warn!( - "{signer}: not registered for reward cycle {reward_cycle}. Ignoring command: {command:?}" - ); - } else { - info!( - "{signer}: Queuing an external runloop command ({:?}): {command:?}", - signer - .state_machine - .public_keys - .signers - .get(&signer.signer_id) - ); - signer.commands.push_back(command.command); - } - } + ); // After processing event, run the next command for each signer - signer.process_next_command(&self.stacks_client, current_reward_cycle); + signer.process_command( + &self.stacks_client, + current_reward_cycle, + self.commands.pop_front(), + ); } None } } + #[cfg(test)] mod tests { use blockstack_lib::chainstate::stacks::boot::NakamotoSignerEntry; use libsigner::SignerEntries; + use rand::{thread_rng, Rng, RngCore}; use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; + use super::RewardCycleInfo; + #[test] fn parse_nakamoto_signer_entries_test() { let nmb_signers = 10; @@ -462,4 +469,142 @@ mod tests { (0..nmb_signers).map(|id| id as u32).collect::>() ); } + + #[test] + fn is_in_reward_cycle_info() { + let rand_byte: u8 = std::cmp::max(1, thread_rng().gen()); + let prepare_phase_block_length = rand_byte as u64; + // Ensure the reward cycle is not close to u64 Max to prevent overflow when adding prepare phase len + let reward_cycle_length = (std::cmp::max( + prepare_phase_block_length.wrapping_add(1), + thread_rng().next_u32() as u64, + )) + .wrapping_add(prepare_phase_block_length); + let reward_cycle_phase_block_length = + reward_cycle_length.wrapping_sub(prepare_phase_block_length); + let first_burnchain_block_height = std::cmp::max(1u8, thread_rng().gen()) as u64; + let last_burnchain_block_height = thread_rng().gen_range( + first_burnchain_block_height + ..first_burnchain_block_height + .wrapping_add(reward_cycle_length) + .wrapping_sub(prepare_phase_block_length), + ); + let blocks_mined = last_burnchain_block_height.wrapping_sub(first_burnchain_block_height); + let reward_cycle = blocks_mined / reward_cycle_length; + + let reward_cycle_info = RewardCycleInfo { + reward_cycle, + reward_cycle_length, + prepare_phase_block_length, + first_burnchain_block_height, + last_burnchain_block_height, + }; + assert!(reward_cycle_info.is_in_reward_cycle(first_burnchain_block_height)); + assert!(!reward_cycle_info.is_in_prepare_phase(first_burnchain_block_height)); + + assert!(reward_cycle_info.is_in_reward_cycle(last_burnchain_block_height)); + assert!(!reward_cycle_info.is_in_prepare_phase(last_burnchain_block_height)); + + assert!(!reward_cycle_info + .is_in_reward_cycle(first_burnchain_block_height.wrapping_add(reward_cycle_length))); + assert!(!reward_cycle_info + .is_in_prepare_phase(!first_burnchain_block_height.wrapping_add(reward_cycle_length))); + + assert!(reward_cycle_info.is_in_reward_cycle( + first_burnchain_block_height + .wrapping_add(reward_cycle_length) + .wrapping_sub(1) + )); + assert!(reward_cycle_info.is_in_prepare_phase( + first_burnchain_block_height + .wrapping_add(reward_cycle_length) + .wrapping_sub(1) + )); + + assert!(reward_cycle_info.is_in_reward_cycle( + first_burnchain_block_height.wrapping_add(reward_cycle_phase_block_length) + )); + assert!(!reward_cycle_info.is_in_prepare_phase( + first_burnchain_block_height.wrapping_add(reward_cycle_phase_block_length) + )); + + assert!(reward_cycle_info.is_in_reward_cycle(first_burnchain_block_height.wrapping_add(1))); + assert!( + !reward_cycle_info.is_in_prepare_phase(first_burnchain_block_height.wrapping_add(1)) + ); + + assert!(reward_cycle_info.is_in_reward_cycle( + first_burnchain_block_height + .wrapping_add(reward_cycle_phase_block_length) + .wrapping_add(1) + )); + assert!(reward_cycle_info.is_in_prepare_phase( + first_burnchain_block_height + .wrapping_add(reward_cycle_phase_block_length) + .wrapping_add(1) + )); + } + + #[test] + fn is_in_next_prepare_phase() { + let reward_cycle_info = RewardCycleInfo { + reward_cycle: 5, + reward_cycle_length: 10, + prepare_phase_block_length: 5, + first_burnchain_block_height: 0, + last_burnchain_block_height: 50, + }; + + assert!(!reward_cycle_info.is_in_next_prepare_phase(49)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(50)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(51)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(52)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(53)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(54)); + assert!(reward_cycle_info.is_in_next_prepare_phase(55)); + assert!(reward_cycle_info.is_in_next_prepare_phase(56)); + assert!(reward_cycle_info.is_in_next_prepare_phase(57)); + assert!(reward_cycle_info.is_in_next_prepare_phase(58)); + assert!(reward_cycle_info.is_in_next_prepare_phase(59)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(60)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(61)); + + let rand_byte: u8 = std::cmp::max(1, thread_rng().gen()); + let prepare_phase_block_length = rand_byte as u64; + // Ensure the reward cycle is not close to u64 Max to prevent overflow when adding prepare phase len + let reward_cycle_length = (std::cmp::max( + prepare_phase_block_length.wrapping_add(1), + thread_rng().next_u32() as u64, + )) + .wrapping_add(prepare_phase_block_length); + let reward_cycle_phase_block_length = + reward_cycle_length.wrapping_sub(prepare_phase_block_length); + let first_burnchain_block_height = std::cmp::max(1u8, thread_rng().gen()) as u64; + let last_burnchain_block_height = thread_rng().gen_range( + first_burnchain_block_height + ..first_burnchain_block_height + .wrapping_add(reward_cycle_length) + .wrapping_sub(prepare_phase_block_length), + ); + let blocks_mined = last_burnchain_block_height.wrapping_sub(first_burnchain_block_height); + let reward_cycle = blocks_mined / reward_cycle_length; + + let reward_cycle_info = RewardCycleInfo { + reward_cycle, + reward_cycle_length, + prepare_phase_block_length, + first_burnchain_block_height, + last_burnchain_block_height, + }; + + for i in 0..reward_cycle_length { + if i < reward_cycle_phase_block_length { + assert!(!reward_cycle_info + .is_in_next_prepare_phase(first_burnchain_block_height.wrapping_add(i))); + } else { + assert!(reward_cycle_info + .is_in_next_prepare_phase(first_burnchain_block_height.wrapping_add(i))); + } + } + } } diff --git a/stacks-signer/src/tests/conf/signer-0.toml b/stacks-signer/src/tests/conf/signer-0.toml index 32183e0e79..19002c1914 100644 --- a/stacks-signer/src/tests/conf/signer-0.toml +++ b/stacks-signer/src/tests/conf/signer-0.toml @@ -4,3 +4,4 @@ endpoint = "localhost:30000" network = "testnet" auth_password = "12345" db_path = ":memory:" +metrics_endpoint = "0.0.0.0:9090" \ No newline at end of file diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs new file mode 100644 index 0000000000..e891573df3 --- /dev/null +++ b/stacks-signer/src/v0/mod.rs @@ -0,0 +1,15 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . diff --git a/stacks-signer/src/coordinator.rs b/stacks-signer/src/v1/coordinator.rs similarity index 95% rename from stacks-signer/src/coordinator.rs rename to stacks-signer/src/v1/coordinator.rs index 7469c0ff18..7fc2d238c4 100644 --- a/stacks-signer/src/coordinator.rs +++ b/stacks-signer/src/v1/coordinator.rs @@ -91,17 +91,10 @@ impl CoordinatorSelector { } } new_index + } else if ROTATE_COORDINATORS { + self.coordinator_index.saturating_add(1) % self.coordinator_ids.len() } else { - if ROTATE_COORDINATORS { - let mut new_index = self.coordinator_index.saturating_add(1); - if new_index == self.coordinator_ids.len() { - // We have exhausted all potential coordinators. Go back to the start - new_index = 0; - } - new_index - } else { - self.coordinator_index - } + self.coordinator_index }; self.coordinator_id = *self .coordinator_ids diff --git a/stacks-signer/src/v1/mod.rs b/stacks-signer/src/v1/mod.rs new file mode 100644 index 0000000000..7c2477cf2d --- /dev/null +++ b/stacks-signer/src/v1/mod.rs @@ -0,0 +1,91 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// The coordinator selector for the signer +pub mod coordinator; +/// The signer module for processing events +pub mod signer; +/// The state module for the signer +pub mod signerdb; + +use std::sync::mpsc::{channel, Receiver, Sender}; + +use libsigner::v1::messages::SignerMessage; +use libsigner::SignerEventReceiver; +use slog::slog_info; +use stacks_common::info; +use wsts::state_machine::OperationResult; + +use crate::config::GlobalConfig; +use crate::runloop::{RunLoop, RunLoopCommand}; +use crate::v1::signer::Signer; + +/// The signer type for the v1 signer +pub type RunningSigner = libsigner::RunningSigner< + SignerEventReceiver, + Vec, + SignerMessage, +>; + +/// The spawned signer type for the v1 signer +pub struct SpawnedSigner { + /// The underlying running signer thread handle + running_signer: RunningSigner, + /// The command sender for interacting with the running signer + pub cmd_send: Sender, + /// The result receiver for interacting with the running signer + pub res_recv: Receiver>, +} + +impl From for SpawnedSigner { + fn from(config: GlobalConfig) -> Self { + let endpoint = config.endpoint; + info!("Starting signer with config: {}", config); + let (cmd_send, cmd_recv) = channel(); + let (res_send, res_recv) = channel(); + let ev = SignerEventReceiver::new(config.network.is_mainnet()); + #[cfg(feature = "monitoring_prom")] + { + crate::monitoring::start_serving_monitoring_metrics(config.clone()).ok(); + } + let runloop = RunLoop::new(config); + let mut signer: libsigner::Signer< + RunLoopCommand, + Vec, + RunLoop, + SignerEventReceiver, + SignerMessage, + > = libsigner::Signer::new(runloop, ev, cmd_recv, res_send); + let running_signer = signer.spawn(endpoint).unwrap(); + SpawnedSigner { + running_signer, + cmd_send, + res_recv, + } + } +} + +impl SpawnedSigner { + /// Stop the signer thread and return the final state + pub fn stop(self) -> Option> { + self.running_signer.stop() + } + + /// Wait for the signer to terminate, and get the final state. WARNING: This will hang forever if the event receiver stop signal was never sent/no error occurred. + pub fn join(self) -> Option> { + self.running_signer.join() + } +} diff --git a/stacks-signer/src/signer.rs b/stacks-signer/src/v1/signer.rs similarity index 63% rename from stacks-signer/src/signer.rs rename to stacks-signer/src/v1/signer.rs index 4d23a92c07..f3e25cf466 100644 --- a/stacks-signer/src/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::VecDeque; +use std::fmt::Debug; use std::path::PathBuf; use std::sync::mpsc::Sender; use std::time::Instant; @@ -24,11 +25,13 @@ use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote}; use blockstack_lib::chainstate::stacks::boot::SIGNERS_VOTING_FUNCTION_NAME; use blockstack_lib::chainstate::stacks::StacksTransaction; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; +use blockstack_lib::util_lib::db::Error as DBError; use hashbrown::HashSet; -use libsigner::{ - BlockProposalSigners, BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerEvent, - SignerMessage, +use libsigner::v1::messages::{ + BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, }; +use libsigner::{BlockProposal, SignerEvent}; +use rand_core::OsRng; use serde_derive::{Deserialize, Serialize}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::codec::{read_next, StacksMessageCodec}; @@ -36,9 +39,10 @@ use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::{debug, error, info, warn}; -use wsts::common::{MerkleRoot, Signature}; +use wsts::common::Signature; use wsts::curve::keys::PublicKey; use wsts::curve::point::Point; +use wsts::curve::scalar::Scalar; use wsts::net::{Message, NonceRequest, Packet, SignatureShareRequest}; use wsts::state_machine::coordinator::fire::Coordinator as FireCoordinator; use wsts::state_machine::coordinator::{ @@ -49,26 +53,22 @@ use wsts::state_machine::{OperationResult, SignError}; use wsts::traits::Signer as _; use wsts::v2; -use crate::client::{retry_with_exponential_backoff, ClientError, StackerDB, StacksClient}; +use crate::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; -use crate::coordinator::CoordinatorSelector; -use crate::signerdb::SignerDb; - -/// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID -#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)] -pub struct SignerSlotID(pub u32); - -impl std::fmt::Display for SignerSlotID { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} +use crate::runloop::{RunLoopCommand, SignerCommand}; +use crate::v1::coordinator::CoordinatorSelector; +use crate::v1::signerdb::SignerDb; +use crate::Signer as SignerTrait; /// Additional Info about a proposed block #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct BlockInfo { /// The block we are considering pub block: NakamotoBlock, + /// The burn block height at which the block was proposed + pub burn_block_height: u64, + /// The reward cycle the block belongs to + pub reward_cycle: u64, /// Our vote on the block if we have one yet pub vote: Option, /// Whether the block contents are valid @@ -79,27 +79,26 @@ pub struct BlockInfo { pub signed_over: bool, } -impl BlockInfo { - /// Create a new BlockInfo - pub const fn new(block: NakamotoBlock) -> Self { +impl From for BlockInfo { + fn from(value: BlockProposal) -> Self { Self { - block, + block: value.block, + burn_block_height: value.burn_height, + reward_cycle: value.reward_cycle, vote: None, valid: None, nonce_request: None, signed_over: false, } } - +} +impl BlockInfo { /// Create a new BlockInfo with an associated nonce request packet - pub const fn new_with_request(block: NakamotoBlock, nonce_request: NonceRequest) -> Self { - Self { - block, - vote: None, - valid: None, - nonce_request: Some(nonce_request), - signed_over: true, - } + pub fn new_with_request(block_proposal: BlockProposal, nonce_request: NonceRequest) -> Self { + let mut block_info = BlockInfo::from(block_proposal); + block_info.nonce_request = Some(nonce_request); + block_info.signed_over = true; + block_info } /// Return the block's signer signature hash @@ -108,32 +107,28 @@ impl BlockInfo { } } -/// Which signer operation to perform -#[derive(PartialEq, Clone, Debug)] -pub enum Command { - /// Generate a DKG aggregate public key +/// The specific operations that a signer can perform +#[derive(PartialEq, Eq, Debug, Clone)] +pub enum Operation { + /// A DKG operation Dkg, - /// Sign a message - Sign { - /// The block to sign over - block: NakamotoBlock, - /// Whether to make a taproot signature - is_taproot: bool, - /// Taproot merkle root - merkle_root: Option, - }, + /// A Sign operation + Sign, } /// The Signer state #[derive(PartialEq, Eq, Debug, Clone)] pub enum State { + /// The signer is uninitialized and should read stackerdb to restore state + Uninitialized, /// The signer is idle, waiting for messages and commands Idle, /// The signer is executing a DKG or Sign round - OperationInProgress, + OperationInProgress(Operation), } /// The stacks signer registered for the reward cycle +#[derive(Debug)] pub struct Signer { /// The coordinator for inbound messages for a specific reward cycle pub coordinator: FireCoordinator, @@ -142,7 +137,7 @@ pub struct Signer { /// the state of the signer pub state: State, /// Received Commands that need to be processed - pub commands: VecDeque, + pub commands: VecDeque, /// The stackerdb client pub stackerdb: StackerDB, /// Whether the signer is a mainnet signer or not @@ -159,8 +154,11 @@ pub struct Signer { pub next_signer_addresses: Vec, /// The reward cycle this signer belongs to pub reward_cycle: u64, - /// The tx fee in uSTX to use if the epoch is pre Nakamoto (Epoch 3.0) + /// The default tx fee in uSTX to use when the epoch is pre Nakamoto (Epoch 3.0). pub tx_fee_ustx: u64, + /// If estimating the tx fee, the max tx fee in uSTX to use when the epoch is pre Nakamoto (Epoch 3.0) + /// If None, will not cap the fee. + pub max_tx_fee_ustx: Option, /// The coordinator info for the signer pub coordinator_selector: CoordinatorSelector, /// The approved key registered to the contract @@ -185,10 +183,180 @@ impl std::fmt::Display for Signer { } } +impl SignerTrait for Signer { + /// Create a new signer from the given configuration + fn new(config: SignerConfig) -> Self { + Self::from(config) + } + /// Refresh the next signer data from the given configuration data + fn update_next_signer_data(&mut self, new_signer_config: &SignerConfig) { + self.next_signer_addresses = new_signer_config + .signer_entries + .signer_ids + .keys() + .copied() + .collect(); + self.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); + } + /// Return the reward cycle of the signer + fn reward_cycle(&self) -> u64 { + self.reward_cycle + } + + /// Process the event + fn process_event( + &mut self, + stacks_client: &StacksClient, + event: Option<&SignerEvent>, + res: Sender>, + current_reward_cycle: u64, + ) { + let event_parity = match event { + Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2), + // Block proposal events do have reward cycles, but each proposal has its own cycle, + // and the vec could be heterogenous, so, don't differentiate. + Some(SignerEvent::MinerMessages(..)) + | Some(SignerEvent::NewBurnBlock(_)) + | Some(SignerEvent::StatusCheck) + | None => None, + Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), + }; + let other_signer_parity = (self.reward_cycle + 1) % 2; + if event_parity == Some(other_signer_parity) { + return; + } + if self.approved_aggregate_public_key.is_none() { + if let Err(e) = self.refresh_dkg(stacks_client, res.clone(), current_reward_cycle) { + error!("{self}: failed to refresh DKG: {e}"); + } + } + self.refresh_coordinator(); + if self.approved_aggregate_public_key.is_none() { + if let Err(e) = self.refresh_dkg(stacks_client, res.clone(), current_reward_cycle) { + error!("{self}: failed to refresh DKG: {e}"); + } + } + self.refresh_coordinator(); + debug!("{self}: Processing event: {event:?}"); + match event { + Some(SignerEvent::BlockValidationResponse(block_validate_response)) => { + debug!("{self}: Received a block proposal result from the stacks node..."); + self.handle_block_validate_response( + stacks_client, + block_validate_response, + res, + current_reward_cycle, + ) + } + Some(SignerEvent::SignerMessages(signer_set, messages)) => { + if *signer_set != self.stackerdb.get_signer_set() { + debug!("{self}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring..."); + return; + } + debug!( + "{self}: Received {} messages from the other signers...", + messages.len() + ); + self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); + } + Some(SignerEvent::MinerMessages(messages, miner_key)) => { + let miner_key = PublicKey::try_from(miner_key.to_bytes_compressed().as_slice()) + .expect("FATAL: could not convert from StacksPublicKey to PublicKey"); + self.miner_key = Some(miner_key); + if current_reward_cycle != self.reward_cycle { + // There is not point in processing blocks if we are not the current reward cycle (we can never actually contribute to signing these blocks) + debug!("{self}: Received a proposed block, but this signer's reward cycle is not the current one ({current_reward_cycle}). Ignoring..."); + return; + } + debug!( + "{self}: Received {} messages from the miner", + messages.len(); + "miner_key" => ?miner_key, + ); + self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); + } + Some(SignerEvent::StatusCheck) => { + debug!("{self}: Received a status check event.") + } + Some(SignerEvent::NewBurnBlock(height)) => { + debug!("{self}: Receved a new burn block event for block height {height}") + } + None => { + // No event. Do nothing. + debug!("{self}: No event received") + } + } + } + + fn process_command( + &mut self, + stacks_client: &StacksClient, + current_reward_cycle: u64, + command: Option, + ) { + if let Some(command) = command { + let reward_cycle = command.reward_cycle; + if self.reward_cycle != reward_cycle { + warn!( + "{self}: not registered for reward cycle {reward_cycle}. Ignoring command: {command:?}" + ); + } else { + info!( + "{self}: Queuing an external runloop command ({:?}): {command:?}", + self.state_machine.public_keys.signers.get(&self.signer_id) + ); + self.commands.push_back(command.command); + } + } + self.process_next_command(stacks_client, current_reward_cycle); + } +} + impl Signer { - /// Return the current coordinator. If in the active reward cycle, this is the miner, - /// so the first element of the tuple will be None (because the miner does not have a signer index). - fn get_coordinator(&self, current_reward_cycle: u64) -> (Option, PublicKey) { + /// Attempt to process the next command in the queue, and update state accordingly + fn process_next_command(&mut self, stacks_client: &StacksClient, current_reward_cycle: u64) { + match &self.state { + State::Uninitialized => { + // We cannot process any commands until we have restored our state + warn!("{self}: Cannot process commands until state is restored. Waiting..."); + } + State::Idle => { + let Some(command) = self.commands.front() else { + debug!("{self}: Nothing to process. Waiting for command..."); + return; + }; + let coordinator_id = if matches!(command, SignerCommand::Dkg) { + // We cannot execute a DKG command if we are not the coordinator + Some(self.get_coordinator_dkg().0) + } else { + self.get_coordinator_sign(current_reward_cycle).0 + }; + if coordinator_id != Some(self.signer_id) { + debug!( + "{self}: Coordinator is {coordinator_id:?}. Will not process any commands...", + ); + return; + } + let command = self + .commands + .pop_front() + .expect("BUG: Already asserted that the command queue was not empty"); + self.execute_command(stacks_client, &command); + } + State::OperationInProgress(op) => { + // We cannot execute the next command until the current one is finished... + debug!( + "{self}: Waiting for {op:?} operation to finish. Coordinator state = {:?}", + self.coordinator.state + ); + } + } + } + /// Return the current coordinator. + /// If the current reward cycle is the active reward cycle, this is the miner, + /// so the first element of the tuple will be None (because the miner does not have a signer index). + /// Otherwise, the coordinator is the signer with the index returned by the coordinator selector. + fn get_coordinator_sign(&self, current_reward_cycle: u64) -> (Option, PublicKey) { if self.reward_cycle == current_reward_cycle { let Some(ref cur_miner) = self.miner_key else { error!( @@ -199,17 +367,60 @@ impl Signer { return (Some(selected.0), selected.1); }; // coordinator is the current miner. - (None, cur_miner.clone()) + (None, *cur_miner) } else { let selected = self.coordinator_selector.get_coordinator(); - return (Some(selected.0), selected.1); + (Some(selected.0), selected.1) + } + } + + /// Get the current coordinator for executing DKG + /// This will always use the coordinator selector to determine the coordinator + fn get_coordinator_dkg(&self) -> (u32, PublicKey) { + self.coordinator_selector.get_coordinator() + } + + /// Read stackerdb messages in case the signer was started late or restarted and missed incoming DKG messages + pub fn read_dkg_stackerdb_messages( + &mut self, + stacks_client: &StacksClient, + res: Sender>, + current_reward_cycle: u64, + ) -> Result<(), ClientError> { + if self.state != State::Uninitialized { + // We should only read stackerdb if we are uninitialized + return Ok(()); } + let ordered_packets = self + .stackerdb + .get_dkg_packets(&self.signer_slot_ids)? + .iter() + .filter_map(|packet| { + let coordinator_pubkey = if Self::is_dkg_message(&packet.msg) { + self.get_coordinator_dkg().1 + } else { + debug!( + "{self}: Received a non-DKG message in the DKG message queue. Ignoring it." + ); + return None; + }; + self.verify_packet(stacks_client, packet.clone(), &coordinator_pubkey) + }) + .collect::>(); + // We successfully read stackerdb so we are no longer uninitialized + self.state = State::Idle; + debug!( + "{self}: Processing {} DKG messages from stackerdb: {ordered_packets:?}", + ordered_packets.len() + ); + self.handle_packets(stacks_client, res, &ordered_packets, current_reward_cycle); + Ok(()) } } impl From for Signer { fn from(signer_config: SignerConfig) -> Self { - let stackerdb = StackerDB::from(&signer_config); + let mut stackerdb = StackerDB::from(&signer_config); let num_signers = signer_config .signer_entries @@ -266,21 +477,24 @@ impl From for Signer { signer_config.signer_entries.public_keys, ); - if let Some(state) = signer_db - .get_signer_state(signer_config.reward_cycle) - .expect("Failed to load signer state") - { - debug!( - "Reward cycle #{} Signer #{}: Loading signer", - signer_config.reward_cycle, signer_config.signer_id - ); - state_machine.signer = v2::Signer::load(&state); - } + if let Some(state) = load_encrypted_signer_state( + &mut stackerdb, + signer_config.signer_slot_id, + &state_machine.network_private_key, + ).or_else(|err| { + warn!("Failed to load encrypted signer state from StackerDB, falling back to SignerDB: {err}"); + load_encrypted_signer_state( + &signer_db, + signer_config.reward_cycle, + &state_machine.network_private_key) + }).expect("Failed to load encrypted signer state from both StackerDB and SignerDB") { + state_machine.signer = state; + }; Self { coordinator, state_machine, - state: State::Idle, + state: State::Uninitialized, commands: VecDeque::new(), stackerdb, mainnet: signer_config.mainnet, @@ -295,6 +509,7 @@ impl From for Signer { next_signer_addresses: vec![], reward_cycle: signer_config.reward_cycle, tx_fee_ustx: signer_config.tx_fee_ustx, + max_tx_fee_ustx: signer_config.max_tx_fee_ustx, coordinator_selector, approved_aggregate_public_key: None, miner_key: None, @@ -332,24 +547,21 @@ impl Signer { } /// Update operation - fn update_operation(&mut self) { - self.state = State::OperationInProgress; + fn update_operation(&mut self, operation: Operation) { + self.state = State::OperationInProgress(operation); self.coordinator_selector.last_message_time = Some(Instant::now()); } /// Execute the given command and update state accordingly - fn execute_command(&mut self, stacks_client: &StacksClient, command: &Command) { + fn execute_command(&mut self, stacks_client: &StacksClient, command: &SignerCommand) { match command { - Command::Dkg => { + SignerCommand::Dkg => { + crate::monitoring::increment_commands_processed("dkg"); if self.approved_aggregate_public_key.is_some() { debug!("Reward cycle #{} Signer #{}: Already have an aggregate key. Ignoring DKG command.", self.reward_cycle, self.signer_id); return; } - let vote_round = match retry_with_exponential_backoff(|| { - stacks_client - .get_last_round(self.reward_cycle) - .map_err(backoff::Error::transient) - }) { + let vote_round = match stacks_client.get_last_round(self.reward_cycle) { Ok(last_round) => last_round, Err(e) => { error!("{self}: Unable to perform DKG. Failed to get last round from stacks node: {e:?}"); @@ -367,39 +579,42 @@ impl Signer { Ok(msg) => { let ack = self.stackerdb.send_message_with_retry(msg.into()); debug!("{self}: ACK: {ack:?}",); + self.update_operation(Operation::Dkg); } Err(e) => { error!("{self}: Failed to start DKG: {e:?}",); return; } } + self.update_operation(Operation::Dkg); } - Command::Sign { - block, + SignerCommand::Sign { + block_proposal, is_taproot, merkle_root, } => { + crate::monitoring::increment_commands_processed("sign"); if self.approved_aggregate_public_key.is_none() { debug!("{self}: Cannot sign a block without an approved aggregate public key. Ignore it."); return; } - let signer_signature_hash = block.header.signer_signature_hash(); + let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); let mut block_info = self .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) - .unwrap_or_else(|_| Some(BlockInfo::new(block.clone()))) - .unwrap_or_else(|| BlockInfo::new(block.clone())); + .unwrap_or_else(|_| Some(BlockInfo::from(block_proposal.clone()))) + .unwrap_or_else(|| BlockInfo::from(block_proposal.clone())); if block_info.signed_over { debug!("{self}: Received a sign command for a block we are already signing over. Ignore it."); return; } info!("{self}: Signing block"; - "block_consensus_hash" => %block.header.consensus_hash, - "block_height" => block.header.chain_length, - "pre_sign_block_id" => %block.block_id(), + "block_consensus_hash" => %block_proposal.block.header.consensus_hash, + "block_height" => block_proposal.block.header.chain_length, + "pre_sign_block_id" => %block_proposal.block.block_id(), ); match self.coordinator.start_signing_round( - &block.serialize_to_vec(), + &block_proposal.serialize_to_vec(), *is_taproot, *merkle_root, ) { @@ -408,45 +623,18 @@ impl Signer { debug!("{self}: ACK: {ack:?}",); block_info.signed_over = true; self.signer_db - .insert_block(self.reward_cycle, &block_info) + .insert_block(&block_info) .unwrap_or_else(|e| { error!("{self}: Failed to insert block in DB: {e:?}"); }); + self.update_operation(Operation::Sign); } Err(e) => { error!("{self}: Failed to start signing block: {e:?}",); return; } } - } - } - self.update_operation(); - } - - /// Attempt to process the next command in the queue, and update state accordingly - pub fn process_next_command( - &mut self, - stacks_client: &StacksClient, - current_reward_cycle: u64, - ) { - let coordinator_id = self.get_coordinator(current_reward_cycle).0; - match &self.state { - State::Idle => { - if coordinator_id != Some(self.signer_id) { - debug!( - "{self}: Coordinator is {coordinator_id:?}. Will not process any commands...", - ); - return; - } - if let Some(command) = self.commands.pop_front() { - self.execute_command(stacks_client, &command); - } else { - debug!("{self}: Nothing to process. Waiting for command...",); - } - } - State::OperationInProgress => { - // We cannot execute the next command until the current one is finished... - debug!("{self}: Waiting for coordinator {coordinator_id:?} operation to finish. Coordinator state = {:?}", self.coordinator.state); + self.update_operation(Operation::Sign); } } } @@ -459,9 +647,9 @@ impl Signer { res: Sender>, current_reward_cycle: u64, ) { - let coordinator_id = self.get_coordinator(current_reward_cycle).0; let mut block_info = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { + crate::monitoring::increment_block_validation_responses(true); let signer_signature_hash = block_validate_ok.signer_signature_hash; // For mutability reasons, we need to take the block_info out of the map and add it back after processing let mut block_info = match self @@ -482,7 +670,7 @@ impl Signer { let is_valid = self.verify_block_transactions(stacks_client, &block_info.block); block_info.valid = Some(is_valid); self.signer_db - .insert_block(self.reward_cycle, &block_info) + .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); info!( "{self}: Treating block validation for block {} as valid: {:?}", @@ -492,6 +680,7 @@ impl Signer { block_info } BlockValidateResponse::Reject(block_validate_reject) => { + crate::monitoring::increment_block_validation_responses(false); let signer_signature_hash = block_validate_reject.signer_signature_hash; let mut block_info = match self .signer_db @@ -531,34 +720,15 @@ impl Signer { sig: vec![], }; self.handle_packets(stacks_client, res, &[packet], current_reward_cycle); - } else { - if block_info.valid.unwrap_or(false) - && !block_info.signed_over - && coordinator_id == Some(self.signer_id) - { - // We are the coordinator. Trigger a signing round for this block - debug!( - "{self}: attempt to trigger a signing round for block"; - "signer_sighash" => %block_info.block.header.signer_signature_hash(), - "block_hash" => %block_info.block.header.block_hash(), - ); - self.commands.push_back(Command::Sign { - block: block_info.block.clone(), - is_taproot: false, - merkle_root: None, - }); - } else { - debug!( - "{self}: ignoring block."; - "block_hash" => block_info.block.header.block_hash(), - "valid" => block_info.valid, - "signed_over" => block_info.signed_over, - "coordinator_id" => coordinator_id, - ); - } } + debug!( + "{self}: Received a block validate response"; + "block_hash" => block_info.block.header.block_hash(), + "valid" => block_info.valid, + "signed_over" => block_info.signed_over, + ); self.signer_db - .insert_block(self.reward_cycle, &block_info) + .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); } @@ -570,15 +740,20 @@ impl Signer { messages: &[SignerMessage], current_reward_cycle: u64, ) { - let coordinator_pubkey = self.get_coordinator(current_reward_cycle).1; let packets: Vec = messages .iter() .filter_map(|msg| match msg { SignerMessage::DkgResults { .. } | SignerMessage::BlockResponse(_) + | SignerMessage::EncryptedSignerState(_) | SignerMessage::Transactions(_) => None, // TODO: if a signer tries to trigger DKG and we already have one set in the contract, ignore the request. SignerMessage::Packet(packet) => { + let coordinator_pubkey = if Self::is_dkg_message(&packet.msg) { + self.get_coordinator_dkg().1 + } else { + self.get_coordinator_sign(current_reward_cycle).1 + }; self.verify_packet(stacks_client, packet.clone(), &coordinator_pubkey) } }) @@ -586,61 +761,17 @@ impl Signer { self.handle_packets(stacks_client, res, &packets, current_reward_cycle); } - /// Handle proposed blocks submitted by the miners to stackerdb - fn handle_proposed_blocks( - &mut self, - stacks_client: &StacksClient, - proposals: &[BlockProposalSigners], - ) { - for proposal in proposals { - if proposal.reward_cycle != self.reward_cycle { - debug!( - "{self}: Received proposal for block outside of my reward cycle, ignoring."; - "proposal_reward_cycle" => proposal.reward_cycle, - "proposal_burn_height" => proposal.burn_height, - ); - continue; - } - let sig_hash = proposal.block.header.signer_signature_hash(); - match self.signer_db.block_lookup(self.reward_cycle, &sig_hash) { - Ok(Some(block)) => { - debug!( - "{self}: Received proposal for block already known, ignoring new proposal."; - "signer_sighash" => %sig_hash, - "proposal_burn_height" => proposal.burn_height, - "vote" => ?block.vote.as_ref().map(|v| { - if v.rejected { - "REJECT" - } else { - "ACCEPT" - } - }), - "signed_over" => block.signed_over, - ); - continue; - } - Ok(None) => { - // Store the block in our cache - self.signer_db - .insert_block(self.reward_cycle, &BlockInfo::new(proposal.block.clone())) - .unwrap_or_else(|e| { - error!("{self}: Failed to insert block in DB: {e:?}"); - }); - // Submit the block for validation - stacks_client - .submit_block_for_validation_with_retry(proposal.block.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}"); - }); - } - Err(e) => { - error!( - "{self}: Failed to lookup block in DB: {e:?}. Dropping proposal request." - ); - continue; - } - } - } + /// Helper function for determining if the provided message is a DKG specific message + fn is_dkg_message(msg: &Message) -> bool { + matches!( + msg, + Message::DkgBegin(_) + | Message::DkgEnd(_) + | Message::DkgEndBegin(_) + | Message::DkgPrivateBegin(_) + | Message::DkgPrivateShares(_) + | Message::DkgPublicShares(_) + ) } /// Process inbound packets as both a signer and a coordinator @@ -652,6 +783,9 @@ impl Signer { packets: &[Packet], current_reward_cycle: u64, ) { + if let Ok(packets_len) = packets.len().try_into() { + crate::monitoring::increment_inbound_packets(packets_len); + } let signer_outbound_messages = self .state_machine .process_inbound_messages(packets) @@ -680,13 +814,36 @@ impl Signer { self.process_operation_results(stacks_client, &operation_results); self.send_operation_results(res, operation_results); self.finish_operation(); - } else if !packets.is_empty() && self.coordinator.state != CoordinatorState::Idle { - // We have received a message and are in the middle of an operation. Update our state accordingly - self.update_operation(); + } else if !packets.is_empty() { + // We have received a message. Update our state accordingly + // Let us be extra explicit in case a new state type gets added to wsts' state machine + match &self.coordinator.state { + CoordinatorState::Idle => {} + CoordinatorState::DkgPublicDistribute + | CoordinatorState::DkgPublicGather + | CoordinatorState::DkgPrivateDistribute + | CoordinatorState::DkgPrivateGather + | CoordinatorState::DkgEndDistribute + | CoordinatorState::DkgEndGather => { + self.update_operation(Operation::Dkg); + } + CoordinatorState::NonceRequest(_, _) + | CoordinatorState::NonceGather(_, _) + | CoordinatorState::SigShareRequest(_, _) + | CoordinatorState::SigShareGather(_, _) => { + self.update_operation(Operation::Sign); + } + } } - debug!("{self}: Saving signer state"); - self.save_signer_state(); + if packets + .iter() + .any(|packet| matches!(packet.msg, Message::DkgEnd(_))) + { + debug!("{self}: Saving signer state"); + self.save_signer_state() + .unwrap_or_else(|_| panic!("{self}: Failed to save signer state")); + } self.send_outbound_messages(signer_outbound_messages); self.send_outbound_messages(coordinator_outbound_messages); } @@ -751,26 +908,35 @@ impl Signer { stacks_client: &StacksClient, nonce_request: &mut NonceRequest, ) -> Option { - let Some(block) = - NakamotoBlock::consensus_deserialize(&mut nonce_request.message.as_slice()).ok() + let Some(block_proposal) = + BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()).ok() else { - // We currently reject anything that is not a block + // We currently reject anything that is not a valid block proposal warn!("{self}: Received a nonce request for an unknown message stream. Reject it.",); return None; }; - let signer_signature_hash = block.header.signer_signature_hash(); + if block_proposal.reward_cycle != self.reward_cycle { + // We are not signing for this reward cycle. Reject the block + warn!( + "{self}: Received a nonce request for a different reward cycle. Reject it."; + "requested_reward_cycle" => block_proposal.reward_cycle, + ); + return None; + } + // TODO: could add a check to ignore an old burn block height if we know its oudated. Would require us to store the burn block height we last saw on the side. + let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); let Some(mut block_info) = self .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) .expect("Failed to connect to signer DB") else { debug!( - "{self}: We have received a block sign request for a block we have not seen before. Cache the nonce request and submit the block for validation..."; - "signer_sighash" => %block.header.signer_signature_hash(), + "{self}: received a nonce request for a new block. Submit block for validation. "; + "signer_sighash" => %signer_signature_hash, ); - let block_info = BlockInfo::new_with_request(block.clone(), nonce_request.clone()); + let block_info = BlockInfo::new_with_request(block_proposal, nonce_request.clone()); stacks_client - .submit_block_for_validation_with_retry(block) + .submit_block_for_validation(block_info.block.clone()) .unwrap_or_else(|e| { warn!("{self}: Failed to submit block for validation: {e:?}",); }); @@ -794,10 +960,14 @@ impl Signer { stacks_client: &StacksClient, block: &NakamotoBlock, ) -> bool { - if self.approved_aggregate_public_key.is_some() { - // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set - // TODO: should be only allow special cased transactions during prepare phase before a key is set? - debug!("{self}: Already have an aggregate key. Skipping transaction verification..."); + let next_reward_cycle = self.reward_cycle.wrapping_add(1); + let approved_aggregate_public_key = stacks_client + .get_approved_aggregate_key(next_reward_cycle) + .unwrap_or(None); + if approved_aggregate_public_key.is_some() { + // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set for the upcoming signers' reward cycle + // Otherwise it is a waste of block space and time to enforce as the desired outcome has been reached. + debug!("{self}: Already have an aggregate key for the next signer set's reward cycle ({}). Skipping transaction verification...", next_reward_cycle); return true; } if let Ok(expected_transactions) = self.get_expected_transactions(stacks_client) { @@ -857,7 +1027,7 @@ impl Signer { ) -> Result, ClientError> { let transactions: Vec<_> = self .stackerdb - .get_current_transactions_with_retry()? + .get_current_transactions()? .into_iter() .filter_map(|tx| { if !NakamotoSigners::valid_vote_transaction(nonces, &tx, self.mainnet) { @@ -882,7 +1052,7 @@ impl Signer { let account_nonces = self.get_account_nonces(stacks_client, &self.next_signer_addresses); let transactions: Vec<_> = self .stackerdb - .get_next_transactions_with_retry(&self.next_signer_slot_ids)?; + .get_next_transactions(&self.next_signer_slot_ids)?; let mut filtered_transactions = std::collections::HashMap::new(); NakamotoSigners::update_filtered_transactions( &mut filtered_transactions, @@ -939,8 +1109,8 @@ impl Signer { return None; }; self.signer_db - .insert_block(self.reward_cycle, &updated_block_info) - .expect(&format!("{self}: Failed to insert block in DB")); + .insert_block(&updated_block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); let process_request = updated_block_info.vote.is_some(); if !process_request { debug!("Failed to validate nonce request"); @@ -972,20 +1142,25 @@ impl Signer { // Signers only every trigger non-taproot signing rounds over blocks. Ignore SignTaproot results match operation_result { OperationResult::Sign(signature) => { + crate::monitoring::increment_operation_results("sign"); debug!("{self}: Received signature result"); self.process_signature(signature); } OperationResult::SignTaproot(_) => { + crate::monitoring::increment_operation_results("sign_taproot"); debug!("{self}: Received a signature result for a taproot signature. Nothing to broadcast as we currently sign blocks with a FROST signature."); } OperationResult::Dkg(aggregate_key) => { + crate::monitoring::increment_operation_results("dkg"); self.process_dkg(stacks_client, aggregate_key); } OperationResult::SignError(e) => { + crate::monitoring::increment_operation_results("sign_error"); warn!("{self}: Received a Sign error: {e:?}"); self.process_sign_error(e); } OperationResult::DkgError(e) => { + crate::monitoring::increment_operation_results("dkg_error"); warn!("{self}: Received a DKG error: {e:?}"); // TODO: process these errors and track malicious signers to report } @@ -996,6 +1171,10 @@ impl Signer { /// Process a dkg result by broadcasting a vote to the stacks node fn process_dkg(&mut self, stacks_client: &StacksClient, dkg_public_key: &Point) { let mut dkg_results_bytes = vec![]; + debug!( + "{self}: Received DKG result. Broadcasting vote to the stacks node..."; + "dkg_public_key" => %dkg_public_key + ); if let Err(e) = SignerMessage::serialize_dkg_result( &mut dkg_results_bytes, dkg_public_key, @@ -1003,54 +1182,34 @@ impl Signer { ) { error!("{}: Failed to serialize DKGResults message for StackerDB, will continue operating.", self.signer_id; "error" => %e); - } else { - if let Err(e) = self - .stackerdb - .send_message_bytes_with_retry(&MessageSlotID::DkgResults, dkg_results_bytes) - { - error!("{}: Failed to send DKGResults message to StackerDB, will continue operating.", self.signer_id; + } else if let Err(e) = self + .stackerdb + .send_message_bytes_with_retry(&MessageSlotID::DkgResults, dkg_results_bytes) + { + error!("{}: Failed to send DKGResults message to StackerDB, will continue operating.", self.signer_id; "error" => %e); - } } - let epoch = retry_with_exponential_backoff(|| { - stacks_client - .get_node_epoch() - .map_err(backoff::Error::transient) - }) - .unwrap_or(StacksEpochId::Epoch24); - let tx_fee = if epoch < StacksEpochId::Epoch30 { - debug!("{self}: in pre Epoch 3.0 cycles, must set a transaction fee for the DKG vote."); - Some(self.tx_fee_ustx) - } else { - None - }; // Get our current nonce from the stacks node and compare it against what we have sitting in the stackerdb instance let signer_address = stacks_client.get_signer_address(); // Retreieve ALL account nonces as we may have transactions from other signers in our stackerdb slot that we care about let account_nonces = self.get_account_nonces(stacks_client, &self.signer_addresses); let account_nonce = account_nonces.get(signer_address).unwrap_or(&0); - let signer_transactions = retry_with_exponential_backoff(|| { - self.get_signer_transactions(&account_nonces) - .map_err(backoff::Error::transient) - }) - .map_err(|e| { - warn!("{self}: Unable to get signer transactions: {e:?}"); - }) - .unwrap_or_default(); + let signer_transactions = self + .get_signer_transactions(&account_nonces) + .map_err(|e| { + error!("{self}: Unable to get signer transactions: {e:?}."); + }) + .unwrap_or_default(); // If we have a transaction in the stackerdb slot, we need to increment the nonce hence the +1, else should use the account nonce let next_nonce = signer_transactions .first() .map(|tx| tx.get_origin_nonce().wrapping_add(1)) .unwrap_or(*account_nonce); - match stacks_client.build_vote_for_aggregate_public_key( - self.stackerdb.get_signer_slot_id().0, - self.coordinator.current_dkg_id, - *dkg_public_key, - self.reward_cycle, - tx_fee, - next_nonce, - ) { + let epoch = stacks_client + .get_node_epoch() + .unwrap_or(StacksEpochId::Epoch24); + match self.build_dkg_vote(stacks_client, &epoch, next_nonce, *dkg_public_key) { Ok(new_transaction) => { if let Err(e) = self.broadcast_dkg_vote( stacks_client, @@ -1071,6 +1230,44 @@ impl Signer { } } + /// Build a signed DKG vote transaction + fn build_dkg_vote( + &mut self, + stacks_client: &StacksClient, + epoch: &StacksEpochId, + nonce: u64, + dkg_public_key: Point, + ) -> Result { + let mut unsigned_tx = stacks_client.build_unsigned_vote_for_aggregate_public_key( + self.stackerdb.get_signer_slot_id().0, + self.coordinator.current_dkg_id, + dkg_public_key, + self.reward_cycle, + nonce, + )?; + let tx_fee = if epoch < &StacksEpochId::Epoch30 { + info!("{self}: in pre Epoch 3.0 cycles, must set a transaction fee for the DKG vote."); + let fee = if let Some(max_fee) = self.max_tx_fee_ustx { + let estimated_fee = stacks_client + .get_medium_estimated_fee_ustx(&unsigned_tx) + .map_err(|e| { + warn!("{self}: unable to estimate fee for DKG vote transaction: {e:?}."); + e + }) + .unwrap_or(self.tx_fee_ustx); + std::cmp::min(estimated_fee, max_fee) + } else { + self.tx_fee_ustx + }; + debug!("{self}: Using a fee of {fee} uSTX for DKG vote transaction."); + fee + } else { + 0 + }; + unsigned_tx.set_tx_fee(tx_fee); + stacks_client.sign_transaction(unsigned_tx) + } + // Get the account nonces for the provided list of signer addresses fn get_account_nonces( &self, @@ -1108,7 +1305,7 @@ impl Signer { debug!("{self}: Received a DKG result while in epoch 3.0. Broadcast the transaction only to stackerDB."); } else if epoch == StacksEpochId::Epoch25 { debug!("{self}: Received a DKG result while in epoch 2.5. Broadcast the transaction to the mempool."); - stacks_client.submit_transaction_with_retry(&new_transaction)?; + stacks_client.submit_transaction(&new_transaction)?; info!("{self}: Submitted DKG vote transaction ({txid:?}) to the mempool"); } else { debug!("{self}: Received a DKG result, but are in an unsupported epoch. Do not broadcast the transaction ({}).", new_transaction.txid()); @@ -1118,6 +1315,7 @@ impl Signer { signer_transactions.push(new_transaction); let signer_message = SignerMessage::Transactions(signer_transactions); self.stackerdb.send_message_with_retry(signer_message)?; + crate::monitoring::increment_dkg_votes_submitted(); info!("{self}: Broadcasted DKG vote transaction ({txid}) to stacker DB"); Ok(()) } @@ -1133,9 +1331,11 @@ impl Signer { }; let block_submission = if block_vote.rejected { + crate::monitoring::increment_block_responses_sent(false); // We signed a rejection message. Return a rejection message BlockResponse::rejected(block_vote.signer_signature_hash, signature.clone()) } else { + crate::monitoring::increment_block_responses_sent(true); // we agreed to sign the block hash. Return an approval message BlockResponse::accepted(block_vote.signer_signature_hash, signature.clone()) }; @@ -1190,16 +1390,58 @@ impl Signer { } } - /// Persist state needed to ensure the signer can continue to perform - /// DKG and participate in signing rounds accross crashes - /// - /// # Panics - /// Panics if the insertion fails - fn save_signer_state(&self) { + /// Persist signer state in both SignerDB and StackerDB + fn save_signer_state(&mut self) -> Result<(), PersistenceError> { + let rng = &mut OsRng; + let state = self.state_machine.signer.save(); + let serialized_state = serde_json::to_vec(&state)?; + + let encrypted_state = encrypt( + &self.state_machine.network_private_key, + &serialized_state, + rng, + )?; + + let signerdb_result = self.save_signer_state_in_signerdb(&encrypted_state); + let stackerdb_result = self.save_signer_state_in_stackerdb(encrypted_state); + + if let Err(err) = &signerdb_result { + warn!("{self}: Failed to persist state in SignerDB: {err}"); + } + + if let Err(err) = &stackerdb_result { + warn!("{self}: Failed to persist state in StackerDB: {err}"); + + stackerdb_result + } else { + signerdb_result + } + } + + /// Persist signer state in SignerDB + fn save_signer_state_in_signerdb( + &self, + encrypted_state: &[u8], + ) -> Result<(), PersistenceError> { self.signer_db - .insert_signer_state(self.reward_cycle, &state) - .expect("Failed to persist signer state"); + .insert_encrypted_signer_state(self.reward_cycle, encrypted_state)?; + Ok(()) + } + + /// Persist signer state in StackerDB + /// TODO: this is a no-op until the number of signer slots can be expanded + fn save_signer_state_in_stackerdb( + &mut self, + _encrypted_state: Vec, + ) -> Result<(), PersistenceError> { + /* + * This is a no-op until the number of signer slots can be expanded to 14 + * + let message = SignerMessage::EncryptedSignerState(encrypted_state); + self.stackerdb.send_message_with_retry(message)?; + */ + Ok(()) } /// Send any operation results across the provided channel @@ -1235,39 +1477,93 @@ impl Signer { } } - /// Update the DKG for the provided signer info, triggering it if required - pub fn update_dkg( + /// Refresh DKG and queue it if required + pub fn refresh_dkg( &mut self, stacks_client: &StacksClient, + res: Sender>, current_reward_cycle: u64, ) -> Result<(), ClientError> { - let reward_cycle = self.reward_cycle; + // First attempt to retrieve the aggregate key from the contract. + self.update_approved_aggregate_key(stacks_client)?; + if self.approved_aggregate_public_key.is_some() { + return Ok(()); + } + // Check stackerdb for any missed DKG messages to catch up our state. + self.read_dkg_stackerdb_messages(stacks_client, res, current_reward_cycle)?; + // Check if we should still queue DKG + if !self.should_queue_dkg(stacks_client)? { + return Ok(()); + } + // Because there could be a slight delay in reading pending transactions and a key being approved by the contract, + // check one last time if the approved key was set since we finished the should queue dkg call + self.update_approved_aggregate_key(stacks_client)?; + if self.approved_aggregate_public_key.is_some() { + return Ok(()); + } + if self.commands.front() != Some(&SignerCommand::Dkg) { + info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command..."); + self.commands.push_front(SignerCommand::Dkg); + } else { + debug!("{self}: DKG command already queued..."); + } + Ok(()) + } + + /// Overwrites the approved aggregate key to the value in the contract, updating state accordingly + pub fn update_approved_aggregate_key( + &mut self, + stacks_client: &StacksClient, + ) -> Result<(), ClientError> { let old_dkg = self.approved_aggregate_public_key; self.approved_aggregate_public_key = - stacks_client.get_approved_aggregate_key(reward_cycle)?; + stacks_client.get_approved_aggregate_key(self.reward_cycle)?; if self.approved_aggregate_public_key.is_some() { // TODO: this will never work as is. We need to have stored our party shares on the side etc for this particular aggregate key. // Need to update state to store the necessary info, check against it to see if we have participated in the winning round and // then overwrite our value accordingly. Otherwise, we will be locked out of the round and should not participate. + let internal_dkg = self.coordinator.aggregate_public_key; + if internal_dkg != self.approved_aggregate_public_key { + warn!("{self}: we do not support changing the internal DKG key yet. Expected {internal_dkg:?} got {:?}", self.approved_aggregate_public_key); + } self.coordinator .set_aggregate_public_key(self.approved_aggregate_public_key); if old_dkg != self.approved_aggregate_public_key { - debug!( - "{self}: updated DKG value to {:?}.", + warn!( + "{self}: updated DKG value from {old_dkg:?} to {:?}.", self.approved_aggregate_public_key ); } - return Ok(()); - }; + match self.state { + State::OperationInProgress(Operation::Dkg) => { + debug!( + "{self}: DKG has already been set. Aborting DKG operation {}.", + self.coordinator.current_dkg_id + ); + self.finish_operation(); + } + State::Uninitialized => { + // If we successfully load the DKG value, we are fully initialized + self.state = State::Idle; + } + _ => { + // do nothing + } + } + } + Ok(()) + } + + /// Should DKG be queued to the current signer's command queue + /// This assumes that no key has been approved by the contract yet + pub fn should_queue_dkg(&mut self, stacks_client: &StacksClient) -> Result { if self.state != State::Idle - || Some(self.signer_id) != self.get_coordinator(current_reward_cycle).0 + || self.signer_id != self.get_coordinator_dkg().0 + || self.commands.front() == Some(&SignerCommand::Dkg) { - // We are not the coordinator or we are in the middle of an operation. Do not attempt to queue DKG - return Ok(()); + // We are not the coordinator, we are in the middle of an operation, or we have already queued DKG. Do not attempt to queue DKG + return Ok(false); } - debug!("{self}: Checking if old DKG vote transaction exists in StackerDB..."); - // Have I already voted, but the vote is still pending in StackerDB? Check stackerdb for the same round number and reward cycle vote transaction - // Only get the account nonce of THIS signer as we only care about our own votes, not other signer votes let signer_address = stacks_client.get_signer_address(); let account_nonces = self.get_account_nonces(stacks_client, &[*signer_address]); let old_transactions = self.get_signer_transactions(&account_nonces).map_err(|e| { @@ -1279,20 +1575,19 @@ impl Signer { NakamotoSigners::parse_vote_for_aggregate_public_key(transaction).unwrap_or_else(|| panic!("BUG: {self}: Received an invalid {SIGNERS_VOTING_FUNCTION_NAME} transaction in an already filtered list: {transaction:?}")); if Some(params.aggregate_key) == self.coordinator.aggregate_public_key && params.voting_round == self.coordinator.current_dkg_id - && reward_cycle == self.reward_cycle { debug!("{self}: Not triggering a DKG round. Already have a pending vote transaction."; "txid" => %transaction.txid(), "aggregate_key" => %params.aggregate_key, "voting_round" => params.voting_round ); - return Ok(()); + return Ok(false); } } if let Some(aggregate_key) = stacks_client.get_vote_for_aggregate_public_key( self.coordinator.current_dkg_id, self.reward_cycle, - *stacks_client.get_signer_address(), + *signer_address, )? { let Some(round_weight) = stacks_client .get_round_vote_weight(self.reward_cycle, self.coordinator.current_dkg_id)? @@ -1302,7 +1597,7 @@ impl Signer { "voting_round" => self.coordinator.current_dkg_id, "aggregate_key" => %aggregate_key ); - return Ok(()); + return Ok(false); }; let threshold_weight = stacks_client.get_vote_threshold_weight(self.reward_cycle)?; if round_weight < threshold_weight { @@ -1315,87 +1610,164 @@ impl Signer { "round_weight" => round_weight, "threshold_weight" => threshold_weight ); - return Ok(()); + return Ok(false); } - debug!("{self}: Vote for DKG failed. Triggering a DKG round."; - "voting_round" => self.coordinator.current_dkg_id, - "aggregate_key" => %aggregate_key, - "round_weight" => round_weight, - "threshold_weight" => threshold_weight - ); - } else { - debug!("{self}: Triggering a DKG round."); - } - if self.commands.front() != Some(&Command::Dkg) { - info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command..."); - self.commands.push_front(Command::Dkg); } else { - debug!("{self}: DKG command already queued..."); - } - Ok(()) - } - - /// Process the event - pub fn process_event( - &mut self, - stacks_client: &StacksClient, - event: Option<&SignerEvent>, - res: Sender>, - current_reward_cycle: u64, - ) -> Result<(), ClientError> { - debug!("{self}: Processing event: {event:?}"); - match event { - Some(SignerEvent::BlockValidationResponse(block_validate_response)) => { - debug!("{self}: Received a block proposal result from the stacks node..."); - self.handle_block_validate_response( - stacks_client, - block_validate_response, - res, - current_reward_cycle, - ) - } - Some(SignerEvent::SignerMessages(signer_set, messages)) => { - if *signer_set != self.stackerdb.get_signer_set() { - debug!("{self}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring..."); - return Ok(()); + // Have I already voted, but the vote is still pending in StackerDB? Check stackerdb for the same round number and reward cycle vote transaction + // Only get the account nonce of THIS signer as we only care about our own votes, not other signer votes + let account_nonce = stacks_client.get_account_nonce(signer_address).unwrap_or(0); + let old_transactions = self.stackerdb.get_current_transactions()?; + // Check if we have an existing vote transaction for the same round and reward cycle + for transaction in old_transactions.iter() { + // We should not consider other signer transactions and should ignore invalid transaction versions + if transaction.origin_address() != *signer_address + || transaction.is_mainnet() != self.mainnet + { + continue; } - debug!( - "{self}: Received {} messages from the other signers...", - messages.len() - ); - self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); - } - Some(SignerEvent::MinerMessages(blocks, messages, miner_key)) => { - if let Some(miner_key) = miner_key { - let miner_key = PublicKey::try_from(miner_key.to_bytes_compressed().as_slice()) - .expect("FATAL: could not convert from StacksPublicKey to PublicKey"); - self.miner_key = Some(miner_key); + let Some(params) = + NakamotoSigners::parse_vote_for_aggregate_public_key(transaction) + else { + continue; }; - if current_reward_cycle != self.reward_cycle { - // There is not point in processing blocks if we are not the current reward cycle (we can never actually contribute to signing these blocks) - debug!("{self}: Received a proposed block, but this signer's reward cycle is not the current one ({current_reward_cycle}). Ignoring..."); - return Ok(()); + let Some(dkg_public_key) = self.coordinator.aggregate_public_key else { + break; + }; + if params.aggregate_key == dkg_public_key + && params.voting_round == self.coordinator.current_dkg_id + && params.reward_cycle == self.reward_cycle + { + let origin_nonce = transaction.get_origin_nonce(); + if origin_nonce < account_nonce { + // We have already voted, but our vote nonce is outdated. Resubmit vote with updated transaction + warn!("{self}: DKG vote submitted with invalid nonce ({origin_nonce} < {account_nonce}). Resubmitting vote."); + self.process_dkg(stacks_client, &dkg_public_key); + } else { + debug!("{self}: Already have a pending DKG vote in StackerDB. Waiting for it to be confirmed."; + "txid" => %transaction.txid(), + "aggregate_key" => %params.aggregate_key, + "voting_round" => params.voting_round, + "reward_cycle" => params.reward_cycle, + "nonce" => origin_nonce + ); + } + return Ok(false); } - debug!( - "{self}: Received {} block proposals and {} messages from the miner", - blocks.len(), - messages.len(); - "miner_key" => ?miner_key, - ); - self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); - self.handle_proposed_blocks(stacks_client, blocks); - } - Some(SignerEvent::StatusCheck) => { - debug!("{self}: Received a status check event.") - } - Some(SignerEvent::NewBurnBlock(height)) => { - debug!("{self}: Receved a new burn block event for block height {height}") - } - None => { - // No event. Do nothing. - debug!("{self}: No event received") } } - Ok(()) + Ok(true) + } +} + +fn load_encrypted_signer_state( + storage: S, + id: S::IdType, + private_key: &Scalar, +) -> Result, PersistenceError> { + if let Some(encrypted_state) = storage.get_encrypted_signer_state(id)? { + let serialized_state = decrypt(private_key, &encrypted_state)?; + let state = serde_json::from_slice(&serialized_state) + .expect("Failed to deserialize decryoted state"); + Ok(Some(v2::Signer::load(&state))) + } else { + Ok(None) + } +} + +trait SignerStateStorage { + type IdType; + + fn get_encrypted_signer_state( + self, + signer_config: Self::IdType, + ) -> Result>, PersistenceError>; +} + +impl SignerStateStorage for &mut StackerDB { + type IdType = SignerSlotID; + + fn get_encrypted_signer_state( + self, + id: Self::IdType, + ) -> Result>, PersistenceError> { + Ok(self.get_encrypted_signer_state(id)?) + } +} + +impl SignerStateStorage for &SignerDb { + type IdType = u64; + fn get_encrypted_signer_state( + self, + id: Self::IdType, + ) -> Result>, PersistenceError> { + Ok(self.get_encrypted_signer_state(id)?) + } +} + +fn encrypt( + private_key: &Scalar, + msg: &[u8], + rng: &mut impl rand_core::CryptoRngCore, +) -> Result, EncryptionError> { + wsts::util::encrypt(derive_encryption_key(private_key).as_bytes(), msg, rng) + .map_err(|_| EncryptionError::Encrypt) +} + +fn decrypt(private_key: &Scalar, encrypted_msg: &[u8]) -> Result, EncryptionError> { + wsts::util::decrypt(derive_encryption_key(private_key).as_bytes(), encrypted_msg) + .map_err(|_| EncryptionError::Decrypt) +} + +fn derive_encryption_key(private_key: &Scalar) -> Sha512Trunc256Sum { + let mut prefixed_key = "SIGNER_STATE_ENCRYPTION_KEY/".as_bytes().to_vec(); + prefixed_key.extend_from_slice(&private_key.to_bytes()); + + Sha512Trunc256Sum::from_data(&prefixed_key) +} + +/// Error stemming from a persistence operation +#[derive(Debug, thiserror::Error)] +pub enum PersistenceError { + /// Encryption error + #[error("{0}")] + Encryption(#[from] EncryptionError), + /// Database error + #[error("Database operation failed: {0}")] + DBError(#[from] DBError), + /// Serialization error + #[error("JSON serialization failed: {0}")] + JsonSerializationError(#[from] serde_json::Error), + /// StackerDB client error + #[error("StackerDB client error: {0}")] + StackerDBClientError(#[from] ClientError), +} + +/// Error stemming from a persistence operation +#[derive(Debug, thiserror::Error)] +pub enum EncryptionError { + /// Encryption failed + #[error("Encryption operation failed")] + Encrypt, + /// Decryption failed + #[error("Encryption operation failed")] + Decrypt, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn encrypted_messages_should_be_possible_to_decrypt() { + let msg = "Nobody's gonna know".as_bytes(); + let key = Scalar::random(&mut OsRng); + + let encrypted = encrypt(&key, msg, &mut OsRng).unwrap(); + + assert_ne!(encrypted, msg); + + let decrypted = decrypt(&key, &encrypted).unwrap(); + + assert_eq!(decrypted, msg); } } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/v1/signerdb.rs similarity index 60% rename from stacks-signer/src/signerdb.rs rename to stacks-signer/src/v1/signerdb.rs index ea9c4eeb17..139bed048f 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/v1/signerdb.rs @@ -23,9 +23,8 @@ use rusqlite::{params, Connection, Error as SqliteError, OpenFlags, NO_PARAMS}; use slog::slog_debug; use stacks_common::debug; use stacks_common::util::hash::Sha512Trunc256Sum; -use wsts::traits::SignerState; -use crate::signer::BlockInfo; +use crate::v1::signer::BlockInfo; /// This struct manages a SQLite database connection /// for the signer. @@ -35,18 +34,19 @@ pub struct SignerDb { db: Connection, } -const CREATE_BLOCKS_TABLE: &'static str = " +const CREATE_BLOCKS_TABLE: &str = " CREATE TABLE IF NOT EXISTS blocks ( reward_cycle INTEGER NOT NULL, signer_signature_hash TEXT NOT NULL, block_info TEXT NOT NULL, + burn_block_height INTEGER NOT NULL, PRIMARY KEY (reward_cycle, signer_signature_hash) )"; -const CREATE_SIGNER_STATE_TABLE: &'static str = " +const CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, - state TEXT NOT NULL + encrypted_state BLOB NOT NULL )"; impl SignerDb { @@ -84,26 +84,26 @@ impl SignerDb { } /// Get the signer state for the provided reward cycle if it exists in the database - pub fn get_signer_state(&self, reward_cycle: u64) -> Result, DBError> { - let result: Option = query_row( + pub fn get_encrypted_signer_state( + &self, + reward_cycle: u64, + ) -> Result>, DBError> { + query_row( &self.db, - "SELECT state FROM signer_states WHERE reward_cycle = ?", - &[u64_to_sql(reward_cycle)?], - )?; - - try_deserialize(result) + "SELECT encrypted_state FROM signer_states WHERE reward_cycle = ?", + [u64_to_sql(reward_cycle)?], + ) } /// Insert the given state in the `signer_states` table for the given reward cycle - pub fn insert_signer_state( + pub fn insert_encrypted_signer_state( &self, reward_cycle: u64, - signer_state: &SignerState, + encrypted_signer_state: &[u8], ) -> Result<(), DBError> { - let serialized_state = serde_json::to_string(signer_state)?; self.db.execute( - "INSERT OR REPLACE INTO signer_states (reward_cycle, state) VALUES (?1, ?2)", - params![&u64_to_sql(reward_cycle)?, &serialized_state], + "INSERT OR REPLACE INTO signer_states (reward_cycle, encrypted_state) VALUES (?1, ?2)", + params![&u64_to_sql(reward_cycle)?, &encrypted_signer_state], )?; Ok(()) } @@ -126,30 +126,29 @@ impl SignerDb { /// Insert a block into the database. /// `hash` is the `signer_signature_hash` of the block. - pub fn insert_block( - &mut self, - reward_cycle: u64, - block_info: &BlockInfo, - ) -> Result<(), DBError> { + pub fn insert_block(&mut self, block_info: &BlockInfo) -> Result<(), DBError> { let block_json = serde_json::to_string(&block_info).expect("Unable to serialize block info"); let hash = &block_info.signer_signature_hash(); let block_id = &block_info.block.block_id(); let signed_over = &block_info.signed_over; - debug!( - "Inserting block_info: reward_cycle = {reward_cycle}, sighash = {hash}, block_id = {block_id}, signed = {signed_over} vote = {:?}", - block_info.vote.as_ref().map(|v| { - if v.rejected { - "REJECT" - } else { - "ACCEPT" - } - }) + let vote = block_info + .vote + .as_ref() + .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); + + debug!("Inserting block_info."; + "reward_cycle" => %block_info.reward_cycle, + "burn_block_height" => %block_info.burn_block_height, + "sighash" => %hash, + "block_id" => %block_id, + "signed" => %signed_over, + "vote" => vote ); self.db .execute( - "INSERT OR REPLACE INTO blocks (reward_cycle, signer_signature_hash, block_info) VALUES (?1, ?2, ?3)", - params![&u64_to_sql(reward_cycle)?, hash.to_string(), &block_json], + "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info) VALUES (?1, ?2, ?3, ?4)", + params![u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), &block_json], )?; Ok(()) @@ -170,8 +169,8 @@ where pub fn test_signer_db(db_path: &str) -> SignerDb { use std::fs; - if fs::metadata(&db_path).is_ok() { - fs::remove_file(&db_path).unwrap(); + if fs::metadata(db_path).is_ok() { + fs::remove_file(db_path).unwrap(); } SignerDb::new(db_path).expect("Failed to create signer db") } @@ -184,16 +183,7 @@ mod tests { use blockstack_lib::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoBlockVote, }; - use blockstack_lib::chainstate::stacks::ThresholdSignature; - use num_traits::identities::Zero; - use polynomial::Polynomial; - use stacks_common::bitvec::BitVec; - use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; - use stacks_common::util::secp256k1::MessageSignature; - use wsts::common::Nonce; - use wsts::curve::point::Point; - use wsts::curve::scalar::Scalar; - use wsts::traits::PartyState; + use libsigner::BlockProposal; use super::*; @@ -204,53 +194,23 @@ mod tests { } fn create_block_override( - overrides: impl FnOnce(&mut NakamotoBlock), - ) -> (BlockInfo, NakamotoBlock) { - let header = NakamotoBlockHeader { - version: 1, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([0x04; 20]), - parent_block_id: StacksBlockId([0x05; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; - let mut block = NakamotoBlock { + overrides: impl FnOnce(&mut BlockProposal), + ) -> (BlockInfo, BlockProposal) { + let header = NakamotoBlockHeader::empty(); + let block = NakamotoBlock { header, txs: vec![], }; - overrides(&mut block); - (BlockInfo::new(block.clone()), block) - } - - fn create_signer_state(id: u32) -> SignerState { - let ps1 = PartyState { - polynomial: Some(Polynomial::new(vec![1.into(), 2.into(), 3.into()])), - private_keys: vec![(1, 45.into()), (2, 56.into())], - nonce: Nonce::zero(), - }; - - let ps2 = PartyState { - polynomial: Some(Polynomial::new(vec![1.into(), 2.into(), 3.into()])), - private_keys: vec![(1, 45.into()), (2, 56.into())], - nonce: Nonce::zero(), + let mut block_proposal = BlockProposal { + block, + burn_height: 7, + reward_cycle: 42, }; - - SignerState { - id, - key_ids: vec![2, 4], - num_keys: 12, - num_parties: 10, - threshold: 7, - group_key: Point::from(Scalar::from(42)), - parties: vec![(2, ps1), (4, ps2)], - } + overrides(&mut block_proposal); + (BlockInfo::from(block_proposal.clone()), block_proposal) } - fn create_block() -> (BlockInfo, NakamotoBlock) { + fn create_block() -> (BlockInfo, BlockProposal) { create_block_override(|_| {}) } @@ -263,21 +223,26 @@ mod tests { fn test_basic_signer_db_with_path(db_path: impl AsRef) { let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); - let reward_cycle = 1; - let (block_info, block) = create_block(); - db.insert_block(reward_cycle, &block_info) + let (block_info, block_proposal) = create_block(); + let reward_cycle = block_info.reward_cycle; + db.insert_block(&block_info) .expect("Unable to insert block into db"); - let block_info = db - .block_lookup(reward_cycle, &block.header.signer_signature_hash()) + .block_lookup( + reward_cycle, + &block_proposal.block.header.signer_signature_hash(), + ) .unwrap() .expect("Unable to get block from db"); - assert_eq!(BlockInfo::new(block.clone()), block_info); + assert_eq!(BlockInfo::from(block_proposal.clone()), block_info); // Test looking up a block from a different reward cycle let block_info = db - .block_lookup(reward_cycle + 1, &block.header.signer_signature_hash()) + .block_lookup( + reward_cycle + 1, + &block_proposal.block.header.signer_signature_hash(), + ) .unwrap(); assert!(block_info.is_none()); } @@ -297,23 +262,27 @@ mod tests { fn test_update_block() { let db_path = tmp_db_path(); let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); - let reward_cycle = 42; - let (block_info, block) = create_block(); - db.insert_block(reward_cycle, &block_info) + let (block_info, block_proposal) = create_block(); + let reward_cycle = block_info.reward_cycle; + db.insert_block(&block_info) .expect("Unable to insert block into db"); let block_info = db - .block_lookup(reward_cycle, &block.header.signer_signature_hash()) + .block_lookup( + reward_cycle, + &block_proposal.block.header.signer_signature_hash(), + ) .unwrap() .expect("Unable to get block from db"); - assert_eq!(BlockInfo::new(block.clone()), block_info); + assert_eq!(BlockInfo::from(block_proposal.clone()), block_info); let old_block_info = block_info; - let old_block = block; + let old_block_proposal = block_proposal; - let (mut block_info, block) = create_block_override(|b| { - b.header.signer_signature = old_block.header.signer_signature.clone(); + let (mut block_info, block_proposal) = create_block_override(|b| { + b.block.header.signer_signature = + old_block_proposal.block.header.signer_signature.clone(); }); assert_eq!( block_info.signer_signature_hash(), @@ -324,11 +293,14 @@ mod tests { rejected: false, }; block_info.vote = Some(vote.clone()); - db.insert_block(reward_cycle, &block_info) + db.insert_block(&block_info) .expect("Unable to insert block into db"); let block_info = db - .block_lookup(reward_cycle, &block.header.signer_signature_hash()) + .block_lookup( + reward_cycle, + &block_proposal.block.header.signer_signature_hash(), + ) .unwrap() .expect("Unable to get block from db"); @@ -340,35 +312,33 @@ mod tests { fn test_write_signer_state() { let db_path = tmp_db_path(); let db = SignerDb::new(db_path).expect("Failed to create signer db"); - let state_0 = create_signer_state(0); - let state_1 = create_signer_state(1); + let state_0 = vec![0]; + let state_1 = vec![1; 1024]; - db.insert_signer_state(10, &state_0) + db.insert_encrypted_signer_state(10, &state_0) .expect("Failed to insert signer state"); - db.insert_signer_state(11, &state_1) + db.insert_encrypted_signer_state(11, &state_1) .expect("Failed to insert signer state"); assert_eq!( - db.get_signer_state(10) + db.get_encrypted_signer_state(10) .expect("Failed to get signer state") - .unwrap() - .id, - state_0.id + .unwrap(), + state_0 ); assert_eq!( - db.get_signer_state(11) + db.get_encrypted_signer_state(11) .expect("Failed to get signer state") - .unwrap() - .id, - state_1.id + .unwrap(), + state_1 ); assert!(db - .get_signer_state(12) + .get_encrypted_signer_state(12) .expect("Failed to get signer state") .is_none()); assert!(db - .get_signer_state(9) + .get_encrypted_signer_state(9) .expect("Failed to get signer state") .is_none()); } diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 51084b5653..0247a54512 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -62,8 +62,8 @@ use crate::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; use crate::chainstate::stacks::boot::{POX_2_MAINNET_CODE, POX_2_TESTNET_CODE}; use crate::chainstate::stacks::StacksPublicKey; use crate::core::{ - StacksEpoch, StacksEpochId, MINING_COMMITMENT_WINDOW, NETWORK_ID_MAINNET, NETWORK_ID_TESTNET, - PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, STACKS_2_0_LAST_BLOCK_TO_PROCESS, + StacksEpoch, StacksEpochId, NETWORK_ID_MAINNET, NETWORK_ID_TESTNET, PEER_VERSION_MAINNET, + PEER_VERSION_TESTNET, STACKS_2_0_LAST_BLOCK_TO_PROCESS, }; use crate::deps; use crate::monitoring::update_burnchain_height; @@ -90,6 +90,59 @@ impl BurnchainStateTransition { burn_dist: vec![], accepted_ops: vec![], consumed_leader_keys: vec![], + windowed_block_commits: vec![], + windowed_missed_commits: vec![], + } + } + + /// Get the transaction IDs of all accepted burnchain operations in this block + pub fn txids(&self) -> Vec { + self.accepted_ops.iter().map(|ref op| op.txid()).collect() + } + + /// Get the sum of all burnchain tokens spent in this burnchain block's accepted operations + /// (i.e. applies to block commits). + /// Returns None on overflow. + pub fn total_burns(&self) -> Option { + self.accepted_ops.iter().try_fold(0u64, |acc, op| { + let bf = match op { + BlockstackOperationType::LeaderBlockCommit(ref op) => op.burn_fee, + _ => 0, + }; + acc.checked_add(bf) + }) + } + + /// Get the median block burn from the window. If the window length is even, then the average + /// of the two middle-most values will be returned. + pub fn windowed_median_burns(&self) -> Option { + let block_total_burn_opts = self.windowed_block_commits.iter().map(|block_commits| { + block_commits + .iter() + .try_fold(0u64, |acc, op| acc.checked_add(op.burn_fee)) + }); + + let mut block_total_burns = vec![]; + for burn_opt in block_total_burn_opts.into_iter() { + block_total_burns.push(burn_opt?); + } + + block_total_burns.sort(); + + if block_total_burns.len() == 0 { + return Some(0); + } else if block_total_burns.len() == 1 { + return Some(block_total_burns[0]); + } else if block_total_burns.len() % 2 != 0 { + let idx = block_total_burns.len() / 2; + return block_total_burns.get(idx).map(|b| *b); + } else { + // NOTE: the `- 1` is safe because block_total_burns.len() >= 2 + let idx_left = block_total_burns.len() / 2 - 1; + let idx_right = block_total_burns.len() / 2; + let burn_left = block_total_burns.get(idx_left)?; + let burn_right = block_total_burns.get(idx_right)?; + return Some((burn_left + burn_right) / 2); } } @@ -158,10 +211,26 @@ impl BurnchainStateTransition { }) .epoch_id; + // what was the epoch at the start of this window? + let window_start_epoch_id = SortitionDB::get_stacks_epoch( + sort_tx, + parent_snapshot + .block_height + .saturating_sub(epoch_id.mining_commitment_window().into()), + )? + .unwrap_or_else(|| { + panic!( + "FATAL: no epoch defined at burn height {}", + parent_snapshot.block_height - u64::from(epoch_id.mining_commitment_window()) + ) + }) + .epoch_id; + if !burnchain.is_in_prepare_phase(parent_snapshot.block_height + 1) && !burnchain .pox_constants .is_after_pox_sunset_end(parent_snapshot.block_height + 1, epoch_id) + && (epoch_id < StacksEpochId::Epoch30 || window_start_epoch_id == epoch_id) { // PoX reward-phase is active! // build a map of intended sortition -> missed commit for the missed commits @@ -177,11 +246,11 @@ impl BurnchainStateTransition { } } - for blocks_back in 0..(MINING_COMMITMENT_WINDOW - 1) { + for blocks_back in 0..(epoch_id.mining_commitment_window() - 1) { if parent_snapshot.block_height < (blocks_back as u64) { debug!("Mining commitment window shortened because block height is less than window size"; "block_height" => %parent_snapshot.block_height, - "window_size" => %MINING_COMMITMENT_WINDOW); + "window_size" => %epoch_id.mining_commitment_window()); break; } let block_height = parent_snapshot.block_height - (blocks_back as u64); @@ -202,10 +271,16 @@ impl BurnchainStateTransition { windowed_missed_commits.push(missed_commits_at_height); } + test_debug!( + "Block {} is in a reward phase with PoX. Miner commit window is {}: {:?}", + parent_snapshot.block_height + 1, + windowed_block_commits.len(), + &windowed_block_commits + ); } else { - // PoX reward-phase is not active + // PoX reward-phase is not active, or we're starting a new epoch debug!( - "Block {} is in a prepare phase or post-PoX sunset, so no windowing will take place", + "Block {} is in a prepare phase, in the post-PoX sunset, or in an epoch transition, so no windowing will take place", parent_snapshot.block_height + 1 ); @@ -244,8 +319,9 @@ impl BurnchainStateTransition { // calculate the burn distribution from these operations. // The resulting distribution will contain the user burns that match block commits let burn_dist = BurnSamplePoint::make_min_median_distribution( - windowed_block_commits, - windowed_missed_commits, + epoch_id.mining_commitment_window(), + windowed_block_commits.clone(), + windowed_missed_commits.clone(), burn_blocks, ); BurnSamplePoint::prometheus_update_miner_commitments(&burn_dist); @@ -276,6 +352,8 @@ impl BurnchainStateTransition { burn_dist, accepted_ops, consumed_leader_keys, + windowed_block_commits, + windowed_missed_commits, }) } } diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index f7da4a0ae9..3171ec3c98 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -1123,12 +1123,16 @@ impl BurnchainDB { Ok(res.is_some()) } - pub fn get_burnchain_header( + pub fn get_burnchain_header( conn: &DBConn, + indexer: &B, height: u64, ) -> Result, BurnchainError> { - let qry = "SELECT * FROM burnchain_db_block_headers WHERE block_height = ?1"; - let args = &[&u64_to_sql(height)?]; + let Some(hdr) = indexer.read_burnchain_header(height)? else { + return Ok(None); + }; + let qry = "SELECT * FROM burnchain_db_block_headers WHERE block_hash = ?1"; + let args = &[&hdr.block_hash]; let res: Option = query_row(conn, qry, args)?; Ok(res) } diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 26511e152c..23dc50f62c 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -37,9 +37,11 @@ use self::bitcoin::{ Error as btc_error, }; use crate::chainstate::burn::distribution::BurnSamplePoint; -use crate::chainstate::burn::operations::leader_block_commit::OUTPUTS_PER_COMMIT; +use crate::chainstate::burn::operations::leader_block_commit::{ + MissedBlockCommit, OUTPUTS_PER_COMMIT, +}; use crate::chainstate::burn::operations::{ - BlockstackOperationType, Error as op_error, LeaderKeyRegisterOp, + BlockstackOperationType, Error as op_error, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME, POX_4_NAME}; @@ -646,6 +648,8 @@ pub struct BurnchainStateTransition { pub burn_dist: Vec, pub accepted_ops: Vec, pub consumed_leader_keys: Vec, + pub windowed_block_commits: Vec>, + pub windowed_missed_commits: Vec>, } /// The burnchain block's state transition's ops: diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index 97c9366fec..e9a54bd041 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -547,11 +547,12 @@ fn test_process_block_ops() { // everything will be included let block_opshash_124 = OpsHash::from_txids( - &block_ops_124 + block_ops_124 .clone() .into_iter() .map(|bo| bo.txid()) - .collect(), + .collect::>() + .as_slice(), ); let block_prev_chs_124 = vec![ block_123_snapshot.consensus_hash.clone(), diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index 9c3b5ee477..5a8d958f12 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -231,6 +231,15 @@ fn test_store_and_fetch() { } assert_eq!(&header, &non_canonical_block.header()); + // when we get a block header by its height, it's canonical + for (height, header) in headers.iter().enumerate() { + let hdr = BurnchainDB::get_burnchain_header(burnchain_db.conn(), &headers, height as u64) + .unwrap() + .unwrap(); + assert!(headers.iter().find(|h| **h == hdr).is_some()); + assert_ne!(hdr, non_canonical_block.header()); + } + let looked_up_canon = burnchain_db.get_canonical_chain_tip().unwrap(); assert_eq!(&looked_up_canon, &canonical_block.header()); diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index fc7f6993a8..10e83605b3 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -405,7 +405,6 @@ impl TestBurnchainBlock { new_seed: Option, epoch_marker: u8, ) -> LeaderBlockCommitOp { - let input = (Txid([0; 32]), 0); let pubks = miner .privks .iter() @@ -442,8 +441,22 @@ impl TestBurnchainBlock { &last_snapshot_with_sortition.sortition_id, ) .expect("FATAL: failed to read block commit"); + + let input = SortitionDB::get_last_block_commit_by_sender(ic.conn(), &apparent_sender) + .unwrap() + .map(|commit| (commit.txid.clone(), 1 + (commit.commit_outs.len() as u32))) + .unwrap_or((Txid([0x00; 32]), 0)); + + test_debug!("Last input from {} is {:?}", &apparent_sender, &input); + let mut txop = match get_commit_res { Some(parent) => { + test_debug!( + "Block-commit for {} (burn height {}) builds on leader block-commit {:?}", + block_hash, + self.block_height, + &parent + ); let txop = LeaderBlockCommitOp::new( block_hash, self.block_height, diff --git a/stackslib/src/chainstate/burn/atc.rs b/stackslib/src/chainstate/burn/atc.rs new file mode 100644 index 0000000000..510c5d2032 --- /dev/null +++ b/stackslib/src/chainstate/burn/atc.rs @@ -0,0 +1,1529 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use stacks_common::util::uint::Uint256; + +use crate::stacks_common::util::uint::BitArray; + +/// A fixed-point numerical representation for ATC. The integer and fractional parts are both 64 +/// bits. Internally, this is a Uint256 so that safe addition and multiplication can be done. +/// +/// Bits 0-63 are the fraction. +/// Bits 64-127 are the integer. +/// Bits 128-256 are 0's to facilitate safe addition and multiplication. +/// +/// The reasons we use this instead of f64 for ATC calculations are as follows: +/// * This avoids unrepresentable states, like NaN or +/- INF +/// * This avoids ambiguous states, like +0.0 and -0.0. +/// * This integrates better into the sortition-sampling system, which uses a u256 to represent a +/// probability range (which is what this is going to be used for) +#[derive(Debug, Clone, PartialEq, Copy, Eq, Hash)] +pub(crate) struct AtcRational(pub(crate) Uint256); +impl AtcRational { + /// Construct from a fraction (numerator and denominator) + pub fn frac(num: u64, den: u64) -> Self { + Self((Uint256::from_u64(num) << 64) / Uint256::from_u64(den)) + } + + /// 0 value + pub fn zero() -> Self { + Self(Uint256::zero()) + } + + /// 1 value + pub fn one() -> Self { + Self(Uint256::one() << 64) + } + + /// largest value less than 1 + pub fn one_sup() -> Self { + Self((Uint256::one() << 64) - Uint256::from_u64(1)) + } + + /// Largest possible value (corresponds to u64::MAX.u64::MAX) + pub fn max() -> Self { + Self((Uint256::from_u64(u64::MAX) << 64) | Uint256::from_u64(u64::MAX)) + } + + /// Get integer part + pub fn ipart(&self) -> u64 { + (self.0 >> 64).low_u64() + } + + /// Is this value overflowed? + pub fn is_overflowed(&self) -> bool { + self.0 > Self::max().0 + } + + /// Checked addition + pub fn add(&self, other: &AtcRational) -> Option { + // NOTE: this is always safe since u128::MAX + u128::MAX < Uint256::max() + let sum = AtcRational(self.0 + other.0); + if sum.is_overflowed() { + return None; + } + Some(sum) + } + + /// Checked subtraction + pub fn sub(&self, other: &AtcRational) -> Option { + if self.0 < other.0 { + return None; + } + Some(AtcRational(self.0 - other.0)) + } + + /// Checked multiplication + pub fn mul(&self, other: &AtcRational) -> Option { + // NOTE: this is always safe since u128::MAX * u128::MAX < Uint256::max() + let prod = AtcRational((self.0 * other.0) >> 64); + if prod.is_overflowed() { + return None; + } + Some(prod) + } + + /// Minimum of self and other + pub fn min(&self, other: &AtcRational) -> Self { + if self.0 < other.0 { + Self(self.0.clone()) + } else { + Self(other.0.clone()) + } + } + + /// Hex representation of the inner bits + pub fn to_hex(&self) -> String { + self.0.to_hex_be() + } + + /// Inner u256 ref + pub fn inner(&self) -> &Uint256 { + &self.0 + } + + /// Inner u256, for conversion to something a BurnSamplePoint can use + pub fn into_inner(self) -> Uint256 { + self.0 + } + + /// Convert to a BurnSamplePoint probability for use in calculating a sortition + pub fn into_sortition_probability(self) -> Uint256 { + // AtcRational's fractional part is only 64 bits, so we need to scale it up so that it occupies the + // upper 64 bits of the burn sample point ranges so as to accurately represent the fraction + // of mining power the null miner has. + let prob_u256 = if self.inner() >= Self::one().inner() { + // prevent left-shift overflow + Self::one_sup().into_inner() << 192 + } else { + self.into_inner() << 192 + }; + prob_u256 + } +} + +/// Pre-calculated 1024-member lookup table for the null miner advantage function, as AtcRational +/// fixed point integers. The first item corresponds to the value of the function at 0.0, and the +/// last item corresponds to the function at 1.0 - (1.0 / 1024.0). The input to a function is the +/// assumed total commit carryover -- the ratio between what the winning miner paid in this +/// block-commit to the median of what they historically paid (for an epoch-defined search window +/// size). A value greater than 1.0 means that the miner paid all of the assumed commit +/// carry-over, and the null miner has negligible chances of winning. A value less than 1.0 means +/// that the miner underpaid relative to their past performance, and the closer to 0.0 this ratio +/// is, the more likely the null miner wins and this miner loses. +/// +/// This table is generated with `make_null_miner_lookup_table()` above. +pub(crate) const ATC_LOOKUP: [AtcRational; 1024] = [ + AtcRational(Uint256([14665006693661589504, 0, 0, 0])), + AtcRational(Uint256([14663943061084833792, 0, 0, 0])), + AtcRational(Uint256([14662867262262108160, 0, 0, 0])), + AtcRational(Uint256([14661779159858638848, 0, 0, 0])), + AtcRational(Uint256([14660678615031697408, 0, 0, 0])), + AtcRational(Uint256([14659565487415023616, 0, 0, 0])), + AtcRational(Uint256([14658439635103131648, 0, 0, 0])), + AtcRational(Uint256([14657300914635431936, 0, 0, 0])), + AtcRational(Uint256([14656149180980262912, 0, 0, 0])), + AtcRational(Uint256([14654984287518758912, 0, 0, 0])), + AtcRational(Uint256([14653806086028572672, 0, 0, 0])), + AtcRational(Uint256([14652614426667460608, 0, 0, 0])), + AtcRational(Uint256([14651409157956749312, 0, 0, 0])), + AtcRational(Uint256([14650190126764625920, 0, 0, 0])), + AtcRational(Uint256([14648957178289305600, 0, 0, 0])), + AtcRational(Uint256([14647710156042049536, 0, 0, 0])), + AtcRational(Uint256([14646448901830051840, 0, 0, 0])), + AtcRational(Uint256([14645173255739158528, 0, 0, 0])), + AtcRational(Uint256([14643883056116467712, 0, 0, 0])), + AtcRational(Uint256([14642578139552755712, 0, 0, 0])), + AtcRational(Uint256([14641258340864796672, 0, 0, 0])), + AtcRational(Uint256([14639923493077501952, 0, 0, 0])), + AtcRational(Uint256([14638573427405920256, 0, 0, 0])), + AtcRational(Uint256([14637207973237102592, 0, 0, 0])), + AtcRational(Uint256([14635826958111819776, 0, 0, 0])), + AtcRational(Uint256([14634430207706118144, 0, 0, 0])), + AtcRational(Uint256([14633017545812742144, 0, 0, 0])), + AtcRational(Uint256([14631588794322399232, 0, 0, 0])), + AtcRational(Uint256([14630143773204873216, 0, 0, 0])), + AtcRational(Uint256([14628682300490010624, 0, 0, 0])), + AtcRational(Uint256([14627204192248543232, 0, 0, 0])), + AtcRational(Uint256([14625709262572754944, 0, 0, 0])), + AtcRational(Uint256([14624197323557009408, 0, 0, 0])), + AtcRational(Uint256([14622668185278134272, 0, 0, 0])), + AtcRational(Uint256([14621121655775633408, 0, 0, 0])), + AtcRational(Uint256([14619557541031794688, 0, 0, 0])), + AtcRational(Uint256([14617975644951588864, 0, 0, 0])), + AtcRational(Uint256([14616375769342470144, 0, 0, 0])), + AtcRational(Uint256([14614757713894002688, 0, 0, 0])), + AtcRational(Uint256([14613121276157339648, 0, 0, 0])), + AtcRational(Uint256([14611466251524579328, 0, 0, 0])), + AtcRational(Uint256([14609792433207912448, 0, 0, 0])), + AtcRational(Uint256([14608099612218703872, 0, 0, 0])), + AtcRational(Uint256([14606387577346342912, 0, 0, 0])), + AtcRational(Uint256([14604656115137021952, 0, 0, 0])), + AtcRational(Uint256([14602905009872304128, 0, 0, 0])), + AtcRational(Uint256([14601134043547590656, 0, 0, 0])), + AtcRational(Uint256([14599342995850407936, 0, 0, 0])), + AtcRational(Uint256([14597531644138579968, 0, 0, 0])), + AtcRational(Uint256([14595699763418222592, 0, 0, 0])), + AtcRational(Uint256([14593847126321623040, 0, 0, 0])), + AtcRational(Uint256([14591973503084957696, 0, 0, 0])), + AtcRational(Uint256([14590078661525866496, 0, 0, 0])), + AtcRational(Uint256([14588162367020904448, 0, 0, 0])), + AtcRational(Uint256([14586224382482810880, 0, 0, 0])), + AtcRational(Uint256([14584264468337694720, 0, 0, 0])), + AtcRational(Uint256([14582282382502025216, 0, 0, 0])), + AtcRational(Uint256([14580277880359520256, 0, 0, 0])), + AtcRational(Uint256([14578250714737874944, 0, 0, 0])), + AtcRational(Uint256([14576200635885367296, 0, 0, 0])), + AtcRational(Uint256([14574127391447336960, 0, 0, 0])), + AtcRational(Uint256([14572030726442487808, 0, 0, 0])), + AtcRational(Uint256([14569910383239120896, 0, 0, 0])), + AtcRational(Uint256([14567766101531174912, 0, 0, 0])), + AtcRational(Uint256([14565597618314184704, 0, 0, 0])), + AtcRational(Uint256([14563404667861078016, 0, 0, 0])), + AtcRational(Uint256([14561186981697867776, 0, 0, 0])), + AtcRational(Uint256([14558944288579205120, 0, 0, 0])), + AtcRational(Uint256([14556676314463823872, 0, 0, 0])), + AtcRational(Uint256([14554382782489843712, 0, 0, 0])), + AtcRational(Uint256([14552063412949977088, 0, 0, 0])), + AtcRational(Uint256([14549717923266603008, 0, 0, 0])), + AtcRational(Uint256([14547346027966732288, 0, 0, 0])), + AtcRational(Uint256([14544947438656860160, 0, 0, 0])), + AtcRational(Uint256([14542521863997716480, 0, 0, 0])), + AtcRational(Uint256([14540069009678876672, 0, 0, 0])), + AtcRational(Uint256([14537588578393323520, 0, 0, 0])), + AtcRational(Uint256([14535080269811841024, 0, 0, 0])), + AtcRational(Uint256([14532543780557377536, 0, 0, 0])), + AtcRational(Uint256([14529978804179232768, 0, 0, 0])), + AtcRational(Uint256([14527385031127242752, 0, 0, 0])), + AtcRational(Uint256([14524762148725782528, 0, 0, 0])), + AtcRational(Uint256([14522109841147760640, 0, 0, 0])), + AtcRational(Uint256([14519427789388460032, 0, 0, 0])), + AtcRational(Uint256([14516715671239366656, 0, 0, 0])), + AtcRational(Uint256([14513973161261858816, 0, 0, 0])), + AtcRational(Uint256([14511199930760869888, 0, 0, 0])), + AtcRational(Uint256([14508395647758436352, 0, 0, 0])), + AtcRational(Uint256([14505559976967245824, 0, 0, 0])), + AtcRational(Uint256([14502692579764047872, 0, 0, 0])), + AtcRational(Uint256([14499793114163054592, 0, 0, 0])), + AtcRational(Uint256([14496861234789287936, 0, 0, 0])), + AtcRational(Uint256([14493896592851855360, 0, 0, 0])), + AtcRational(Uint256([14490898836117196800, 0, 0, 0])), + AtcRational(Uint256([14487867608882292736, 0, 0, 0])), + AtcRational(Uint256([14484802551947833344, 0, 0, 0])), + AtcRational(Uint256([14481703302591363072, 0, 0, 0])), + AtcRational(Uint256([14478569494540392448, 0, 0, 0])), + AtcRational(Uint256([14475400757945503744, 0, 0, 0])), + AtcRational(Uint256([14472196719353440256, 0, 0, 0])), + AtcRational(Uint256([14468957001680179200, 0, 0, 0])), + AtcRational(Uint256([14465681224184016896, 0, 0, 0])), + AtcRational(Uint256([14462369002438653952, 0, 0, 0])), + AtcRational(Uint256([14459019948306282496, 0, 0, 0])), + AtcRational(Uint256([14455633669910710272, 0, 0, 0])), + AtcRational(Uint256([14452209771610484736, 0, 0, 0])), + AtcRational(Uint256([14448747853972076544, 0, 0, 0])), + AtcRational(Uint256([14445247513743073280, 0, 0, 0])), + AtcRational(Uint256([14441708343825438720, 0, 0, 0])), + AtcRational(Uint256([14438129933248808960, 0, 0, 0])), + AtcRational(Uint256([14434511867143868416, 0, 0, 0])), + AtcRational(Uint256([14430853726715774976, 0, 0, 0])), + AtcRational(Uint256([14427155089217667072, 0, 0, 0])), + AtcRational(Uint256([14423415527924258816, 0, 0, 0])), + AtcRational(Uint256([14419634612105521152, 0, 0, 0])), + AtcRational(Uint256([14415811907000477696, 0, 0, 0])), + AtcRational(Uint256([14411946973791092736, 0, 0, 0])), + AtcRational(Uint256([14408039369576282112, 0, 0, 0])), + AtcRational(Uint256([14404088647346073600, 0, 0, 0])), + AtcRational(Uint256([14400094355955869696, 0, 0, 0])), + AtcRational(Uint256([14396056040100884480, 0, 0, 0])), + AtcRational(Uint256([14391973240290742272, 0, 0, 0])), + AtcRational(Uint256([14387845492824211456, 0, 0, 0])), + AtcRational(Uint256([14383672329764151296, 0, 0, 0])), + AtcRational(Uint256([14379453278912624640, 0, 0, 0])), + AtcRational(Uint256([14375187863786246144, 0, 0, 0])), + AtcRational(Uint256([14370875603591677952, 0, 0, 0])), + AtcRational(Uint256([14366516013201418240, 0, 0, 0])), + AtcRational(Uint256([14362108603129778176, 0, 0, 0])), + AtcRational(Uint256([14357652879509125120, 0, 0, 0])), + AtcRational(Uint256([14353148344066387968, 0, 0, 0])), + AtcRational(Uint256([14348594494099806208, 0, 0, 0])), + AtcRational(Uint256([14343990822456012800, 0, 0, 0])), + AtcRational(Uint256([14339336817507360768, 0, 0, 0])), + AtcRational(Uint256([14334631963129606144, 0, 0, 0])), + AtcRational(Uint256([14329875738679891968, 0, 0, 0])), + AtcRational(Uint256([14325067618975068160, 0, 0, 0])), + AtcRational(Uint256([14320207074270386176, 0, 0, 0])), + AtcRational(Uint256([14315293570238543872, 0, 0, 0])), + AtcRational(Uint256([14310326567949113344, 0, 0, 0])), + AtcRational(Uint256([14305305523848388608, 0, 0, 0])), + AtcRational(Uint256([14300229889739610112, 0, 0, 0])), + AtcRational(Uint256([14295099112763666432, 0, 0, 0])), + AtcRational(Uint256([14289912635380201472, 0, 0, 0])), + AtcRational(Uint256([14284669895349196800, 0, 0, 0])), + AtcRational(Uint256([14279370325713045504, 0, 0, 0])), + AtcRational(Uint256([14274013354779123712, 0, 0, 0])), + AtcRational(Uint256([14268598406102849536, 0, 0, 0])), + AtcRational(Uint256([14263124898471307264, 0, 0, 0])), + AtcRational(Uint256([14257592245887395840, 0, 0, 0])), + AtcRational(Uint256([14251999857554575360, 0, 0, 0])), + AtcRational(Uint256([14246347137862176768, 0, 0, 0])), + AtcRational(Uint256([14240633486371330048, 0, 0, 0])), + AtcRational(Uint256([14234858297801515008, 0, 0, 0])), + AtcRational(Uint256([14229020962017785856, 0, 0, 0])), + AtcRational(Uint256([14223120864018599936, 0, 0, 0])), + AtcRational(Uint256([14217157383924420608, 0, 0, 0])), + AtcRational(Uint256([14211129896966959104, 0, 0, 0])), + AtcRational(Uint256([14205037773479176192, 0, 0, 0])), + AtcRational(Uint256([14198880378886055936, 0, 0, 0])), + AtcRational(Uint256([14192657073696112640, 0, 0, 0])), + AtcRational(Uint256([14186367213493727232, 0, 0, 0])), + AtcRational(Uint256([14180010148932296704, 0, 0, 0])), + AtcRational(Uint256([14173585225728227328, 0, 0, 0])), + AtcRational(Uint256([14167091784655794176, 0, 0, 0])), + AtcRational(Uint256([14160529161542889472, 0, 0, 0])), + AtcRational(Uint256([14153896687267710976, 0, 0, 0])), + AtcRational(Uint256([14147193687756355584, 0, 0, 0])), + AtcRational(Uint256([14140419483981410304, 0, 0, 0])), + AtcRational(Uint256([14133573391961522176, 0, 0, 0])), + AtcRational(Uint256([14126654722761990144, 0, 0, 0])), + AtcRational(Uint256([14119662782496409600, 0, 0, 0])), + AtcRational(Uint256([14112596872329363456, 0, 0, 0])), + AtcRational(Uint256([14105456288480262144, 0, 0, 0])), + AtcRational(Uint256([14098240322228244480, 0, 0, 0])), + AtcRational(Uint256([14090948259918305280, 0, 0, 0])), + AtcRational(Uint256([14083579382968543232, 0, 0, 0])), + AtcRational(Uint256([14076132967878658048, 0, 0, 0])), + AtcRational(Uint256([14068608286239690752, 0, 0, 0])), + AtcRational(Uint256([14061004604745011200, 0, 0, 0])), + AtcRational(Uint256([14053321185202620416, 0, 0, 0])), + AtcRational(Uint256([14045557284548792320, 0, 0, 0])), + AtcRational(Uint256([14037712154863056896, 0, 0, 0])), + AtcRational(Uint256([14029785043384606720, 0, 0, 0])), + AtcRational(Uint256([14021775192530079744, 0, 0, 0])), + AtcRational(Uint256([14013681839912861696, 0, 0, 0])), + AtcRational(Uint256([14005504218363817984, 0, 0, 0])), + AtcRational(Uint256([13997241555953580032, 0, 0, 0])), + AtcRational(Uint256([13988893076016375808, 0, 0, 0])), + AtcRational(Uint256([13980457997175449600, 0, 0, 0])), + AtcRational(Uint256([13971935533370089472, 0, 0, 0])), + AtcRational(Uint256([13963324893884334080, 0, 0, 0])), + AtcRational(Uint256([13954625283377340416, 0, 0, 0])), + AtcRational(Uint256([13945835901915490304, 0, 0, 0])), + AtcRational(Uint256([13936955945006243840, 0, 0, 0])), + AtcRational(Uint256([13927984603633807360, 0, 0, 0])), + AtcRational(Uint256([13918921064296585216, 0, 0, 0])), + AtcRational(Uint256([13909764509046546432, 0, 0, 0])), + AtcRational(Uint256([13900514115530459136, 0, 0, 0])), + AtcRational(Uint256([13891169057033058304, 0, 0, 0])), + AtcRational(Uint256([13881728502522195968, 0, 0, 0])), + AtcRational(Uint256([13872191616696016896, 0, 0, 0])), + AtcRational(Uint256([13862557560032120832, 0, 0, 0])), + AtcRational(Uint256([13852825488838891520, 0, 0, 0])), + AtcRational(Uint256([13842994555308853248, 0, 0, 0])), + AtcRational(Uint256([13833063907574269952, 0, 0, 0])), + AtcRational(Uint256([13823032689764870144, 0, 0, 0])), + AtcRational(Uint256([13812900042067845120, 0, 0, 0])), + AtcRational(Uint256([13802665100790099968, 0, 0, 0])), + AtcRational(Uint256([13792326998422816768, 0, 0, 0])), + AtcRational(Uint256([13781884863708366848, 0, 0, 0])), + AtcRational(Uint256([13771337821709592576, 0, 0, 0])), + AtcRational(Uint256([13760684993881540608, 0, 0, 0])), + AtcRational(Uint256([13749925498145615872, 0, 0, 0])), + AtcRational(Uint256([13739058448966279168, 0, 0, 0])), + AtcRational(Uint256([13728082957430233088, 0, 0, 0])), + AtcRational(Uint256([13716998131328233472, 0, 0, 0])), + AtcRational(Uint256([13705803075239473152, 0, 0, 0])), + AtcRational(Uint256([13694496890618648576, 0, 0, 0])), + AtcRational(Uint256([13683078675885682688, 0, 0, 0])), + AtcRational(Uint256([13671547526518214656, 0, 0, 0])), + AtcRational(Uint256([13659902535146829824, 0, 0, 0])), + AtcRational(Uint256([13648142791653093376, 0, 0, 0])), + AtcRational(Uint256([13636267383270436864, 0, 0, 0])), + AtcRational(Uint256([13624275394687913984, 0, 0, 0])), + AtcRational(Uint256([13612165908156874752, 0, 0, 0])), + AtcRational(Uint256([13599938003600584704, 0, 0, 0])), + AtcRational(Uint256([13587590758726844416, 0, 0, 0])), + AtcRational(Uint256([13575123249143625728, 0, 0, 0])), + AtcRational(Uint256([13562534548477763584, 0, 0, 0])), + AtcRational(Uint256([13549823728496742400, 0, 0, 0])), + AtcRational(Uint256([13536989859233630208, 0, 0, 0])), + AtcRational(Uint256([13524032009115150336, 0, 0, 0])), + AtcRational(Uint256([13510949245092962304, 0, 0, 0])), + AtcRational(Uint256([13497740632778186752, 0, 0, 0])), + AtcRational(Uint256([13484405236579164160, 0, 0, 0])), + AtcRational(Uint256([13470942119842529280, 0, 0, 0])), + AtcRational(Uint256([13457350344997619712, 0, 0, 0])), + AtcRational(Uint256([13443628973704206336, 0, 0, 0])), + AtcRational(Uint256([13429777067003654144, 0, 0, 0])), + AtcRational(Uint256([13415793685473462272, 0, 0, 0])), + AtcRational(Uint256([13401677889385263104, 0, 0, 0])), + AtcRational(Uint256([13387428738866302976, 0, 0, 0])), + AtcRational(Uint256([13373045294064392192, 0, 0, 0])), + AtcRational(Uint256([13358526615316400128, 0, 0, 0])), + AtcRational(Uint256([13343871763320287232, 0, 0, 0])), + AtcRational(Uint256([13329079799310704640, 0, 0, 0])), + AtcRational(Uint256([13314149785238190080, 0, 0, 0])), + AtcRational(Uint256([13299080783951974400, 0, 0, 0])), + AtcRational(Uint256([13283871859386413056, 0, 0, 0])), + AtcRational(Uint256([13268522076751075328, 0, 0, 0])), + AtcRational(Uint256([13253030502724497408, 0, 0, 0])), + AtcRational(Uint256([13237396205651617792, 0, 0, 0])), + AtcRational(Uint256([13221618255744899072, 0, 0, 0])), + AtcRational(Uint256([13205695725289166848, 0, 0, 0])), + AtcRational(Uint256([13189627688850184192, 0, 0, 0])), + AtcRational(Uint256([13173413223486908416, 0, 0, 0])), + AtcRational(Uint256([13157051408967542784, 0, 0, 0])), + AtcRational(Uint256([13140541327989270528, 0, 0, 0])), + AtcRational(Uint256([13123882066401785856, 0, 0, 0])), + AtcRational(Uint256([13107072713434537984, 0, 0, 0])), + AtcRational(Uint256([13090112361927747584, 0, 0, 0])), + AtcRational(Uint256([13073000108567144448, 0, 0, 0])), + AtcRational(Uint256([13055735054122481664, 0, 0, 0])), + AtcRational(Uint256([13038316303689742336, 0, 0, 0])), + AtcRational(Uint256([13020742966937124864, 0, 0, 0])), + AtcRational(Uint256([13003014158354718720, 0, 0, 0])), + AtcRational(Uint256([12985128997507874816, 0, 0, 0])), + AtcRational(Uint256([12967086609294301184, 0, 0, 0])), + AtcRational(Uint256([12948886124204806144, 0, 0, 0])), + AtcRational(Uint256([12930526678587715584, 0, 0, 0])), + AtcRational(Uint256([12912007414916904960, 0, 0, 0])), + AtcRational(Uint256([12893327482063446016, 0, 0, 0])), + AtcRational(Uint256([12874486035570843648, 0, 0, 0])), + AtcRational(Uint256([12855482237933809664, 0, 0, 0])), + AtcRational(Uint256([12836315258880561152, 0, 0, 0])), + AtcRational(Uint256([12816984275658594304, 0, 0, 0])), + AtcRational(Uint256([12797488473323913216, 0, 0, 0])), + AtcRational(Uint256([12777827045033641984, 0, 0, 0])), + AtcRational(Uint256([12757999192342022144, 0, 0, 0])), + AtcRational(Uint256([12738004125499680768, 0, 0, 0])), + AtcRational(Uint256([12717841063756201984, 0, 0, 0])), + AtcRational(Uint256([12697509235665854464, 0, 0, 0])), + AtcRational(Uint256([12677007879396530176, 0, 0, 0])), + AtcRational(Uint256([12656336243041691648, 0, 0, 0])), + AtcRational(Uint256([12635493584935419904, 0, 0, 0])), + AtcRational(Uint256([12614479173970364416, 0, 0, 0])), + AtcRational(Uint256([12593292289918617600, 0, 0, 0])), + AtcRational(Uint256([12571932223755370496, 0, 0, 0])), + AtcRational(Uint256([12550398277985329152, 0, 0, 0])), + AtcRational(Uint256([12528689766971766784, 0, 0, 0])), + AtcRational(Uint256([12506806017268160512, 0, 0, 0])), + AtcRational(Uint256([12484746367952306176, 0, 0, 0])), + AtcRational(Uint256([12462510170962810880, 0, 0, 0])), + AtcRational(Uint256([12440096791437899776, 0, 0, 0])), + AtcRational(Uint256([12417505608056395776, 0, 0, 0])), + AtcRational(Uint256([12394736013380814848, 0, 0, 0])), + AtcRational(Uint256([12371787414202433536, 0, 0, 0])), + AtcRational(Uint256([12348659231888226304, 0, 0, 0])), + AtcRational(Uint256([12325350902729566208, 0, 0, 0])), + AtcRational(Uint256([12301861878292580352, 0, 0, 0])), + AtcRational(Uint256([12278191625770014720, 0, 0, 0])), + AtcRational(Uint256([12254339628334479360, 0, 0, 0])), + AtcRational(Uint256([12230305385492973568, 0, 0, 0])), + AtcRational(Uint256([12206088413442545664, 0, 0, 0])), + AtcRational(Uint256([12181688245426927616, 0, 0, 0])), + AtcRational(Uint256([12157104432094023680, 0, 0, 0])), + AtcRational(Uint256([12132336541854107648, 0, 0, 0])), + AtcRational(Uint256([12107384161238581248, 0, 0, 0])), + AtcRational(Uint256([12082246895259109376, 0, 0, 0])), + AtcRational(Uint256([12056924367767033856, 0, 0, 0])), + AtcRational(Uint256([12031416221812840448, 0, 0, 0])), + AtcRational(Uint256([12005722120005560320, 0, 0, 0])), + AtcRational(Uint256([11979841744871907328, 0, 0, 0])), + AtcRational(Uint256([11953774799215020032, 0, 0, 0])), + AtcRational(Uint256([11927521006472566784, 0, 0, 0])), + AtcRational(Uint256([11901080111074107392, 0, 0, 0])), + AtcRational(Uint256([11874451878797459456, 0, 0, 0])), + AtcRational(Uint256([11847636097123960832, 0, 0, 0])), + AtcRational(Uint256([11820632575592335360, 0, 0, 0])), + AtcRational(Uint256([11793441146151079936, 0, 0, 0])), + AtcRational(Uint256([11766061663509092352, 0, 0, 0])), + AtcRational(Uint256([11738494005484369920, 0, 0, 0])), + AtcRational(Uint256([11710738073350592512, 0, 0, 0])), + AtcRational(Uint256([11682793792181340160, 0, 0, 0])), + AtcRational(Uint256([11654661111191783424, 0, 0, 0])), + AtcRational(Uint256([11626340004077604864, 0, 0, 0])), + AtcRational(Uint256([11597830469350934528, 0, 0, 0])), + AtcRational(Uint256([11569132530673096704, 0, 0, 0])), + AtcRational(Uint256([11540246237183952896, 0, 0, 0])), + AtcRational(Uint256([11511171663827582976, 0, 0, 0])), + AtcRational(Uint256([11481908911674114048, 0, 0, 0])), + AtcRational(Uint256([11452458108237473792, 0, 0, 0])), + AtcRational(Uint256([11422819407788793856, 0, 0, 0])), + AtcRational(Uint256([11392992991665272832, 0, 0, 0])), + AtcRational(Uint256([11362979068574269440, 0, 0, 0])), + AtcRational(Uint256([11332777874892353536, 0, 0, 0])), + AtcRational(Uint256([11302389674959124480, 0, 0, 0])), + AtcRational(Uint256([11271814761365499904, 0, 0, 0])), + AtcRational(Uint256([11241053455236325376, 0, 0, 0])), + AtcRational(Uint256([11210106106506956800, 0, 0, 0])), + AtcRational(Uint256([11178973094193678336, 0, 0, 0])), + AtcRational(Uint256([11147654826657650688, 0, 0, 0])), + AtcRational(Uint256([11116151741862152192, 0, 0, 0])), + AtcRational(Uint256([11084464307622914048, 0, 0, 0])), + AtcRational(Uint256([11052593021851269120, 0, 0, 0])), + AtcRational(Uint256([11020538412789880832, 0, 0, 0])), + AtcRational(Uint256([10988301039240828928, 0, 0, 0])), + AtcRational(Uint256([10955881490785785856, 0, 0, 0])), + AtcRational(Uint256([10923280387998085120, 0, 0, 0])), + AtcRational(Uint256([10890498382646384640, 0, 0, 0])), + AtcRational(Uint256([10857536157889769472, 0, 0, 0])), + AtcRational(Uint256([10824394428463968256, 0, 0, 0])), + AtcRational(Uint256([10791073940858529792, 0, 0, 0])), + AtcRational(Uint256([10757575473484689408, 0, 0, 0])), + AtcRational(Uint256([10723899836833691648, 0, 0, 0])), + AtcRational(Uint256([10690047873625384960, 0, 0, 0])), + AtcRational(Uint256([10656020458946807808, 0, 0, 0])), + AtcRational(Uint256([10621818500380600320, 0, 0, 0])), + AtcRational(Uint256([10587442938122995712, 0, 0, 0])), + AtcRational(Uint256([10552894745091184640, 0, 0, 0])), + AtcRational(Uint256([10518174927019845632, 0, 0, 0])), + AtcRational(Uint256([10483284522546655232, 0, 0, 0])), + AtcRational(Uint256([10448224603286523904, 0, 0, 0])), + AtcRational(Uint256([10412996273894438912, 0, 0, 0])), + AtcRational(Uint256([10377600672116664320, 0, 0, 0])), + AtcRational(Uint256([10342038968830132224, 0, 0, 0])), + AtcRational(Uint256([10306312368069857280, 0, 0, 0])), + AtcRational(Uint256([10270422107044188160, 0, 0, 0])), + AtcRational(Uint256([10234369456137705472, 0, 0, 0])), + AtcRational(Uint256([10198155718901680128, 0, 0, 0])), + AtcRational(Uint256([10161782232031832064, 0, 0, 0])), + AtcRational(Uint256([10125250365333327872, 0, 0, 0])), + AtcRational(Uint256([10088561521672830976, 0, 0, 0])), + AtcRational(Uint256([10051717136917477376, 0, 0, 0])), + AtcRational(Uint256([10014718679860666368, 0, 0, 0])), + AtcRational(Uint256([9977567652134516736, 0, 0, 0])), + AtcRational(Uint256([9940265588108912640, 0, 0, 0])), + AtcRational(Uint256([9902814054777008128, 0, 0, 0])), + AtcRational(Uint256([9865214651627091968, 0, 0, 0])), + AtcRational(Uint256([9827469010500773888, 0, 0, 0])), + AtcRational(Uint256([9789578795437342720, 0, 0, 0])), + AtcRational(Uint256([9751545702504284160, 0, 0, 0])), + AtcRational(Uint256([9713371459613874176, 0, 0, 0])), + AtcRational(Uint256([9675057826325798912, 0, 0, 0])), + AtcRational(Uint256([9636606593635780608, 0, 0, 0])), + AtcRational(Uint256([9598019583750131712, 0, 0, 0])), + AtcRational(Uint256([9559298649846272000, 0, 0, 0])), + AtcRational(Uint256([9520445675819153408, 0, 0, 0])), + AtcRational(Uint256([9481462576013621248, 0, 0, 0])), + AtcRational(Uint256([9442351294942703616, 0, 0, 0])), + AtcRational(Uint256([9403113806991841280, 0, 0, 0])), + AtcRational(Uint256([9363752116109119488, 0, 0, 0])), + AtcRational(Uint256([9324268255481511936, 0, 0, 0])), + AtcRational(Uint256([9284664287197179904, 0, 0, 0])), + AtcRational(Uint256([9244942301893949440, 0, 0, 0])), + AtcRational(Uint256([9205104418393949184, 0, 0, 0])), + AtcRational(Uint256([9165152783324563456, 0, 0, 0])), + AtcRational(Uint256([9125089570725771264, 0, 0, 0])), + AtcRational(Uint256([9084916981643961344, 0, 0, 0])), + AtcRational(Uint256([9044637243712360448, 0, 0, 0])), + AtcRational(Uint256([9004252610718200832, 0, 0, 0])), + AtcRational(Uint256([8963765362156744704, 0, 0, 0])), + AtcRational(Uint256([8923177802772338688, 0, 0, 0])), + AtcRational(Uint256([8882492262086646784, 0, 0, 0])), + AtcRational(Uint256([8841711093914219520, 0, 0, 0])), + AtcRational(Uint256([8800836675865615360, 0, 0, 0])), + AtcRational(Uint256([8759871408838231040, 0, 0, 0])), + AtcRational(Uint256([8718817716495054848, 0, 0, 0])), + AtcRational(Uint256([8677678044731567104, 0, 0, 0])), + AtcRational(Uint256([8636454861130998784, 0, 0, 0])), + AtcRational(Uint256([8595150654408180736, 0, 0, 0])), + AtcRational(Uint256([8553767933842236416, 0, 0, 0])), + AtcRational(Uint256([8512309228698363904, 0, 0, 0])), + AtcRational(Uint256([8470777087638975488, 0, 0, 0])), + AtcRational(Uint256([8429174078124461056, 0, 0, 0])), + AtcRational(Uint256([8387502785803874304, 0, 0, 0])), + AtcRational(Uint256([8345765813895795712, 0, 0, 0])), + AtcRational(Uint256([8303965782559726592, 0, 0, 0])), + AtcRational(Uint256([8262105328258275328, 0, 0, 0])), + AtcRational(Uint256([8220187103110477824, 0, 0, 0])), + AtcRational(Uint256([8178213774236573696, 0, 0, 0])), + AtcRational(Uint256([8136188023094564864, 0, 0, 0])), + AtcRational(Uint256([8094112544808916992, 0, 0, 0])), + AtcRational(Uint256([8051990047491715072, 0, 0, 0])), + AtcRational(Uint256([8009823251556677632, 0, 0, 0])), + AtcRational(Uint256([7967614889026356224, 0, 0, 0])), + AtcRational(Uint256([7925367702832887808, 0, 0, 0])), + AtcRational(Uint256([7883084446112715776, 0, 0, 0])), + AtcRational(Uint256([7840767881495595008, 0, 0, 0])), + AtcRational(Uint256([7798420780388343808, 0, 0, 0])), + AtcRational(Uint256([7756045922253651968, 0, 0, 0])), + AtcRational(Uint256([7713646093884422144, 0, 0, 0])), + AtcRational(Uint256([7671224088673970176, 0, 0, 0])), + AtcRational(Uint256([7628782705882552320, 0, 0, 0])), + AtcRational(Uint256([7586324749900575744, 0, 0, 0])), + AtcRational(Uint256([7543853029508941824, 0, 0, 0])), + AtcRational(Uint256([7501370357136906240, 0, 0, 0])), + AtcRational(Uint256([7458879548117898240, 0, 0, 0])), + AtcRational(Uint256([7416383419943693312, 0, 0, 0])), + AtcRational(Uint256([7373884791517374464, 0, 0, 0])), + AtcRational(Uint256([7331386482405493760, 0, 0, 0])), + AtcRational(Uint256([7288891312089871360, 0, 0, 0])), + AtcRational(Uint256([7246402099219427328, 0, 0, 0])), + AtcRational(Uint256([7203921660862483456, 0, 0, 0])), + AtcRational(Uint256([7161452811759982592, 0, 0, 0])), + AtcRational(Uint256([7118998363579975680, 0, 0, 0])), + AtcRational(Uint256([7076561124173879296, 0, 0, 0])), + AtcRational(Uint256([7034143896834856960, 0, 0, 0])), + AtcRational(Uint256([6991749479558778880, 0, 0, 0])), + AtcRational(Uint256([6949380664308144128, 0, 0, 0])), + AtcRational(Uint256([6907040236279402496, 0, 0, 0])), + AtcRational(Uint256([6864730973174070272, 0, 0, 0])), + AtcRational(Uint256([6822455644474029056, 0, 0, 0])), + AtcRational(Uint256([6780217010721434624, 0, 0, 0])), + AtcRational(Uint256([6738017822803616768, 0, 0, 0])), + AtcRational(Uint256([6695860821243351040, 0, 0, 0])), + AtcRational(Uint256([6653748735494901760, 0, 0, 0])), + AtcRational(Uint256([6611684283246219264, 0, 0, 0])), + AtcRational(Uint256([6569670169727631360, 0, 0, 0])), + AtcRational(Uint256([6527709087027459072, 0, 0, 0])), + AtcRational(Uint256([6485803713414843392, 0, 0, 0])), + AtcRational(Uint256([6443956712670195712, 0, 0, 0])), + AtcRational(Uint256([6402170733423590400, 0, 0, 0])), + AtcRational(Uint256([6360448408501444608, 0, 0, 0])), + AtcRational(Uint256([6318792354281820160, 0, 0, 0])), + AtcRational(Uint256([6277205170058672128, 0, 0, 0])), + AtcRational(Uint256([6235689437415347200, 0, 0, 0])), + AtcRational(Uint256([6194247719607663616, 0, 0, 0])), + AtcRational(Uint256([6152882560956841984, 0, 0, 0])), + AtcRational(Uint256([6111596486252597248, 0, 0, 0])), + AtcRational(Uint256([6070392000166668288, 0, 0, 0])), + AtcRational(Uint256([6029271586677042176, 0, 0, 0])), + AtcRational(Uint256([5988237708503158784, 0, 0, 0])), + AtcRational(Uint256([5947292806552320000, 0, 0, 0])), + AtcRational(Uint256([5906439299377565696, 0, 0, 0])), + AtcRational(Uint256([5865679582647235584, 0, 0, 0])), + AtcRational(Uint256([5825016028626446336, 0, 0, 0])), + AtcRational(Uint256([5784450985670685696, 0, 0, 0])), + AtcRational(Uint256([5743986777731734528, 0, 0, 0])), + AtcRational(Uint256([5703625703876088832, 0, 0, 0])), + AtcRational(Uint256([5663370037816086528, 0, 0, 0])), + AtcRational(Uint256([5623222027453882368, 0, 0, 0])), + AtcRational(Uint256([5583183894438436864, 0, 0, 0])), + AtcRational(Uint256([5543257833735676928, 0, 0, 0])), + AtcRational(Uint256([5503446013211941888, 0, 0, 0])), + AtcRational(Uint256([5463750573230858240, 0, 0, 0])), + AtcRational(Uint256([5424173626263745536, 0, 0, 0])), + AtcRational(Uint256([5384717256513666048, 0, 0, 0])), + AtcRational(Uint256([5345383519553192960, 0, 0, 0])), + AtcRational(Uint256([5306174441976003584, 0, 0, 0])), + AtcRational(Uint256([5267092021062338560, 0, 0, 0])), + AtcRational(Uint256([5228138224458407936, 0, 0, 0])), + AtcRational(Uint256([5189314989869789184, 0, 0, 0])), + AtcRational(Uint256([5150624224768840704, 0, 0, 0])), + AtcRational(Uint256([5112067806116179968, 0, 0, 0])), + AtcRational(Uint256([5073647580096222208, 0, 0, 0])), + AtcRational(Uint256([5035365361866804224, 0, 0, 0])), + AtcRational(Uint256([4997222935322875904, 0, 0, 0])), + AtcRational(Uint256([4959222052874251264, 0, 0, 0])), + AtcRational(Uint256([4921364435237403648, 0, 0, 0])), + AtcRational(Uint256([4883651771241251840, 0, 0, 0])), + AtcRational(Uint256([4846085717646911488, 0, 0, 0])), + AtcRational(Uint256([4808667898981359616, 0, 0, 0])), + AtcRational(Uint256([4771399907384928256, 0, 0, 0])), + AtcRational(Uint256([4734283302472590336, 0, 0, 0])), + AtcRational(Uint256([4697319611208928256, 0, 0, 0])), + AtcRational(Uint256([4660510327796715520, 0, 0, 0])), + AtcRational(Uint256([4623856913578997760, 0, 0, 0])), + AtcRational(Uint256([4587360796954596352, 0, 0, 0])), + AtcRational(Uint256([4551023373306879488, 0, 0, 0])), + AtcRational(Uint256([4514846004945721344, 0, 0, 0])), + AtcRational(Uint256([4478830021062493696, 0, 0, 0])), + AtcRational(Uint256([4442976717697962496, 0, 0, 0])), + AtcRational(Uint256([4407287357722949632, 0, 0, 0])), + AtcRational(Uint256([4371763170831599616, 0, 0, 0])), + AtcRational(Uint256([4336405353547112960, 0, 0, 0])), + AtcRational(Uint256([4301215069239754752, 0, 0, 0])), + AtcRational(Uint256([4266193448156999680, 0, 0, 0])), + AtcRational(Uint256([4231341587465614848, 0, 0, 0])), + AtcRational(Uint256([4196660551305514496, 0, 0, 0])), + AtcRational(Uint256([4162151370855192064, 0, 0, 0])), + AtcRational(Uint256([4127815044408539136, 0, 0, 0])), + AtcRational(Uint256([4093652537462862336, 0, 0, 0])), + AtcRational(Uint256([4059664782817884160, 0, 0, 0])), + AtcRational(Uint256([4025852680685536768, 0, 0, 0])), + AtcRational(Uint256([3992217098810330624, 0, 0, 0])), + AtcRational(Uint256([3958758872600086528, 0, 0, 0])), + AtcRational(Uint256([3925478805266815488, 0, 0, 0])), + AtcRational(Uint256([3892377667977526784, 0, 0, 0])), + AtcRational(Uint256([3859456200014740992, 0, 0, 0])), + AtcRational(Uint256([3826715108946479616, 0, 0, 0])), + AtcRational(Uint256([3794155070805506048, 0, 0, 0])), + AtcRational(Uint256([3761776730277590016, 0, 0, 0])), + AtcRational(Uint256([3729580700898548736, 0, 0, 0])), + AtcRational(Uint256([3697567565259854336, 0, 0, 0])), + AtcRational(Uint256([3665737875222543872, 0, 0, 0])), + AtcRational(Uint256([3634092152139219456, 0, 0, 0])), + AtcRational(Uint256([3602630887083875840, 0, 0, 0])), + AtcRational(Uint256([3571354541089344000, 0, 0, 0])), + AtcRational(Uint256([3540263545392078336, 0, 0, 0])), + AtcRational(Uint256([3509358301684075008, 0, 0, 0])), + AtcRational(Uint256([3478639182371662336, 0, 0, 0])), + AtcRational(Uint256([3448106530840935936, 0, 0, 0])), + AtcRational(Uint256([3417760661729580032, 0, 0, 0])), + AtcRational(Uint256([3387601861204853760, 0, 0, 0])), + AtcRational(Uint256([3357630387247493120, 0, 0, 0])), + AtcRational(Uint256([3327846469941282304, 0, 0, 0])), + AtcRational(Uint256([3298250311768075776, 0, 0, 0])), + AtcRational(Uint256([3268842087908014080, 0, 0, 0])), + AtcRational(Uint256([3239621946544709632, 0, 0, 0])), + AtcRational(Uint256([3210590009175161344, 0, 0, 0])), + AtcRational(Uint256([3181746370924176384, 0, 0, 0])), + AtcRational(Uint256([3153091100863047680, 0, 0, 0])), + AtcRational(Uint256([3124624242332286464, 0, 0, 0])), + AtcRational(Uint256([3096345813268148736, 0, 0, 0])), + AtcRational(Uint256([3068255806532773376, 0, 0, 0])), + AtcRational(Uint256([3040354190247658496, 0, 0, 0])), + AtcRational(Uint256([3012640908130307584, 0, 0, 0])), + AtcRational(Uint256([2985115879833786880, 0, 0, 0])), + AtcRational(Uint256([2957779001289008640, 0, 0, 0])), + AtcRational(Uint256([2930630145049504256, 0, 0, 0])), + AtcRational(Uint256([2903669160638502400, 0, 0, 0])), + AtcRational(Uint256([2876895874898083840, 0, 0, 0])), + AtcRational(Uint256([2850310092340229632, 0, 0, 0])), + AtcRational(Uint256([2823911595499543552, 0, 0, 0])), + AtcRational(Uint256([2797700145287476736, 0, 0, 0])), + AtcRational(Uint256([2771675481347832320, 0, 0, 0])), + AtcRational(Uint256([2745837322413390848, 0, 0, 0])), + AtcRational(Uint256([2720185366663441408, 0, 0, 0])), + AtcRational(Uint256([2694719292082065408, 0, 0, 0])), + AtcRational(Uint256([2669438756816964096, 0, 0, 0])), + AtcRational(Uint256([2644343399538680832, 0, 0, 0])), + AtcRational(Uint256([2619432839800029696, 0, 0, 0])), + AtcRational(Uint256([2594706678395571200, 0, 0, 0])), + AtcRational(Uint256([2570164497720961536, 0, 0, 0])), + AtcRational(Uint256([2545805862132034048, 0, 0, 0])), + AtcRational(Uint256([2521630318303431168, 0, 0, 0])), + AtcRational(Uint256([2497637395586657792, 0, 0, 0])), + AtcRational(Uint256([2473826606367389696, 0, 0, 0])), + AtcRational(Uint256([2450197446421903360, 0, 0, 0])), + AtcRational(Uint256([2426749395272486912, 0, 0, 0])), + AtcRational(Uint256([2403481916541677568, 0, 0, 0])), + AtcRational(Uint256([2380394458305224704, 0, 0, 0])), + AtcRational(Uint256([2357486453443613696, 0, 0, 0])), + AtcRational(Uint256([2334757319992054272, 0, 0, 0])), + AtcRational(Uint256([2312206461488791552, 0, 0, 0])), + AtcRational(Uint256([2289833267321639936, 0, 0, 0])), + AtcRational(Uint256([2267637113072605440, 0, 0, 0])), + AtcRational(Uint256([2245617360860510720, 0, 0, 0])), + AtcRational(Uint256([2223773359681494528, 0, 0, 0])), + AtcRational(Uint256([2202104445747299072, 0, 0, 0])), + AtcRational(Uint256([2180609942821237760, 0, 0, 0])), + AtcRational(Uint256([2159289162551755520, 0, 0, 0])), + AtcRational(Uint256([2138141404803482112, 0, 0, 0])), + AtcRational(Uint256([2117165957985701120, 0, 0, 0])), + AtcRational(Uint256([2096362099378140928, 0, 0, 0])), + AtcRational(Uint256([2075729095454018048, 0, 0, 0])), + AtcRational(Uint256([2055266202200243968, 0, 0, 0])), + AtcRational(Uint256([2034972665434736128, 0, 0, 0])), + AtcRational(Uint256([2014847721120749056, 0, 0, 0])), + AtcRational(Uint256([1994890595678173952, 0, 0, 0])), + AtcRational(Uint256([1975100506291729152, 0, 0, 0])), + AtcRational(Uint256([1955476661215996672, 0, 0, 0])), + AtcRational(Uint256([1936018260077233664, 0, 0, 0])), + AtcRational(Uint256([1916724494171921152, 0, 0, 0])), + AtcRational(Uint256([1897594546761984256, 0, 0, 0])), + AtcRational(Uint256([1878627593366651904, 0, 0, 0])), + AtcRational(Uint256([1859822802050898432, 0, 0, 0])), + AtcRational(Uint256([1841179333710439168, 0, 0, 0])), + AtcRational(Uint256([1822696342353232640, 0, 0, 0])), + AtcRational(Uint256([1804372975377456640, 0, 0, 0])), + AtcRational(Uint256([1786208373845930240, 0, 0, 0])), + AtcRational(Uint256([1768201672756947200, 0, 0, 0])), + AtcRational(Uint256([1750352001311492352, 0, 0, 0])), + AtcRational(Uint256([1732658483176829696, 0, 0, 0])), + AtcRational(Uint256([1715120236746417152, 0, 0, 0])), + AtcRational(Uint256([1697736375396155136, 0, 0, 0])), + AtcRational(Uint256([1680506007736934400, 0, 0, 0])), + AtcRational(Uint256([1663428237863474432, 0, 0, 0])), + AtcRational(Uint256([1646502165599439872, 0, 0, 0])), + AtcRational(Uint256([1629726886738828032, 0, 0, 0])), + AtcRational(Uint256([1613101493283616512, 0, 0, 0])), + AtcRational(Uint256([1596625073677668096, 0, 0, 0])), + AtcRational(Uint256([1580296713036883968, 0, 0, 0])), + AtcRational(Uint256([1564115493375614976, 0, 0, 0])), + AtcRational(Uint256([1548080493829320192, 0, 0, 0])), + AtcRational(Uint256([1532190790873484288, 0, 0, 0])), + AtcRational(Uint256([1516445458538788608, 0, 0, 0])), + AtcRational(Uint256([1500843568622554368, 0, 0, 0])), + AtcRational(Uint256([1485384190896454656, 0, 0, 0])), + AtcRational(Uint256([1470066393310513152, 0, 0, 0])), + AtcRational(Uint256([1454889242193393664, 0, 0, 0])), + AtcRational(Uint256([1439851802449000192, 0, 0, 0])), + AtcRational(Uint256([1424953137749395968, 0, 0, 0])), + AtcRational(Uint256([1410192310724064000, 0, 0, 0])), + AtcRational(Uint256([1395568383145516288, 0, 0, 0])), + AtcRational(Uint256([1381080416111280128, 0, 0, 0])), + AtcRational(Uint256([1366727470222276864, 0, 0, 0])), + AtcRational(Uint256([1352508605757614592, 0, 0, 0])), + AtcRational(Uint256([1338422882845812992, 0, 0, 0])), + AtcRational(Uint256([1324469361632493312, 0, 0, 0])), + AtcRational(Uint256([1310647102444549376, 0, 0, 0])), + AtcRational(Uint256([1296955165950824704, 0, 0, 0])), + AtcRational(Uint256([1283392613319330816, 0, 0, 0])), + AtcRational(Uint256([1269958506371023872, 0, 0, 0])), + AtcRational(Uint256([1256651907730177536, 0, 0, 0])), + AtcRational(Uint256([1243471880971367936, 0, 0, 0])), + AtcRational(Uint256([1230417490763117312, 0, 0, 0])), + AtcRational(Uint256([1217487803008212736, 0, 0, 0])), + AtcRational(Uint256([1204681884980741632, 0, 0, 0])), + AtcRational(Uint256([1191998805459865088, 0, 0, 0])), + AtcRational(Uint256([1179437634860375808, 0, 0, 0])), + AtcRational(Uint256([1166997445360058880, 0, 0, 0])), + AtcRational(Uint256([1154677311023903744, 0, 0, 0])), + AtcRational(Uint256([1142476307925186944, 0, 0, 0])), + AtcRational(Uint256([1130393514263474816, 0, 0, 0])), + AtcRational(Uint256([1118428010479570176, 0, 0, 0])), + AtcRational(Uint256([1106578879367446784, 0, 0, 0])), + AtcRational(Uint256([1094845206183200000, 0, 0, 0])), + AtcRational(Uint256([1083226078751057536, 0, 0, 0])), + AtcRational(Uint256([1071720587566481536, 0, 0, 0])), + AtcRational(Uint256([1060327825896404608, 0, 0, 0])), + AtcRational(Uint256([1049046889876627200, 0, 0, 0])), + AtcRational(Uint256([1037876878606426112, 0, 0, 0])), + AtcRational(Uint256([1026816894240403456, 0, 0, 0])), + AtcRational(Uint256([1015866042077617536, 0, 0, 0])), + AtcRational(Uint256([1005023430648028800, 0, 0, 0])), + AtcRational(Uint256([994288171796307968, 0, 0, 0])), + AtcRational(Uint256([983659380763034624, 0, 0, 0])), + AtcRational(Uint256([973136176263332992, 0, 0, 0])), + AtcRational(Uint256([962717680562974336, 0, 0, 0])), + AtcRational(Uint256([952403019551993984, 0, 0, 0])), + AtcRational(Uint256([942191322815853056, 0, 0, 0])), + AtcRational(Uint256([932081723704189696, 0, 0, 0])), + AtcRational(Uint256([922073359397190528, 0, 0, 0])), + AtcRational(Uint256([912165370969629056, 0, 0, 0])), + AtcRational(Uint256([902356903452603136, 0, 0, 0])), + AtcRational(Uint256([892647105893010176, 0, 0, 0])), + AtcRational(Uint256([883035131410800384, 0, 0, 0])), + AtcRational(Uint256([873520137254044800, 0, 0, 0])), + AtcRational(Uint256([864101284851852928, 0, 0, 0])), + AtcRational(Uint256([854777739865181312, 0, 0, 0])), + AtcRational(Uint256([845548672235568384, 0, 0, 0])), + AtcRational(Uint256([836413256231831552, 0, 0, 0])), + AtcRational(Uint256([827370670494766720, 0, 0, 0])), + AtcRational(Uint256([818420098079881728, 0, 0, 0])), + AtcRational(Uint256([809560726498204800, 0, 0, 0])), + AtcRational(Uint256([800791747755200896, 0, 0, 0])), + AtcRational(Uint256([792112358387835392, 0, 0, 0])), + AtcRational(Uint256([783521759499814016, 0, 0, 0])), + AtcRational(Uint256([775019156795042816, 0, 0, 0])), + AtcRational(Uint256([766603760609335424, 0, 0, 0])), + AtcRational(Uint256([758274785940408960, 0, 0, 0])), + AtcRational(Uint256([750031452476196608, 0, 0, 0])), + AtcRational(Uint256([741872984621515008, 0, 0, 0])), + AtcRational(Uint256([733798611523120256, 0, 0, 0])), + AtcRational(Uint256([725807567093184512, 0, 0, 0])), + AtcRational(Uint256([717899090031224448, 0, 0, 0])), + AtcRational(Uint256([710072423844518784, 0, 0, 0])), + AtcRational(Uint256([702326816867043968, 0, 0, 0])), + AtcRational(Uint256([694661522276962432, 0, 0, 0])), + AtcRational(Uint256([687075798112689920, 0, 0, 0])), + AtcRational(Uint256([679568907287580672, 0, 0, 0])), + AtcRational(Uint256([672140117603256192, 0, 0, 0])), + AtcRational(Uint256([664788701761609984, 0, 0, 0])), + AtcRational(Uint256([657513937375516800, 0, 0, 0])), + AtcRational(Uint256([650315106978278272, 0, 0, 0])), + AtcRational(Uint256([643191498031836288, 0, 0, 0])), + AtcRational(Uint256([636142402933774464, 0, 0, 0])), + AtcRational(Uint256([629167119023148800, 0, 0, 0])), + AtcRational(Uint256([622264948585165440, 0, 0, 0])), + AtcRational(Uint256([615435198854739840, 0, 0, 0])), + AtcRational(Uint256([608677182018960512, 0, 0, 0])), + AtcRational(Uint256([601990215218487424, 0, 0, 0])), + AtcRational(Uint256([595373620547912192, 0, 0, 0])), + AtcRational(Uint256([588826725055103488, 0, 0, 0])), + AtcRational(Uint256([582348860739565568, 0, 0, 0])), + AtcRational(Uint256([575939364549835840, 0, 0, 0])), + AtcRational(Uint256([569597578379946176, 0, 0, 0])), + AtcRational(Uint256([563322849064973184, 0, 0, 0])), + AtcRational(Uint256([557114528375699392, 0, 0, 0])), + AtcRational(Uint256([550971973012414144, 0, 0, 0])), + AtcRational(Uint256([544894544597873792, 0, 0, 0])), + AtcRational(Uint256([538881609669446912, 0, 0, 0])), + AtcRational(Uint256([532932539670464960, 0, 0, 0])), + AtcRational(Uint256([527046710940803776, 0, 0, 0])), + AtcRational(Uint256([521223504706716480, 0, 0, 0])), + AtcRational(Uint256([515462307069940352, 0, 0, 0])), + AtcRational(Uint256([509762508996097024, 0, 0, 0])), + AtcRational(Uint256([504123506302410304, 0, 0, 0])), + AtcRational(Uint256([498544699644759936, 0, 0, 0])), + AtcRational(Uint256([493025494504093248, 0, 0, 0])), + AtcRational(Uint256([487565301172211520, 0, 0, 0])), + AtcRational(Uint256([482163534736955520, 0, 0, 0])), + AtcRational(Uint256([476819615066805056, 0, 0, 0])), + AtcRational(Uint256([471532966794915008, 0, 0, 0])), + AtcRational(Uint256([466303019302601600, 0, 0, 0])), + AtcRational(Uint256([461129206702303360, 0, 0, 0])), + AtcRational(Uint256([456010967820029760, 0, 0, 0])), + AtcRational(Uint256([450947746177316224, 0, 0, 0])), + AtcRational(Uint256([445938989972704576, 0, 0, 0])), + AtcRational(Uint256([440984152062762688, 0, 0, 0])), + AtcRational(Uint256([436082689942662912, 0, 0, 0])), + AtcRational(Uint256([431234065726332992, 0, 0, 0])), + AtcRational(Uint256([426437746126196672, 0, 0, 0])), + AtcRational(Uint256([421693202432519040, 0, 0, 0])), + AtcRational(Uint256([416999910492373440, 0, 0, 0])), + AtcRational(Uint256([412357350688240704, 0, 0, 0])), + AtcRational(Uint256([407765007916260352, 0, 0, 0])), + AtcRational(Uint256([403222371564144896, 0, 0, 0])), + AtcRational(Uint256([398728935488772800, 0, 0, 0])), + AtcRational(Uint256([394284197993471488, 0, 0, 0])), + AtcRational(Uint256([389887661805007040, 0, 0, 0])), + AtcRational(Uint256([385538834050291776, 0, 0, 0])), + AtcRational(Uint256([381237226232822592, 0, 0, 0])), + AtcRational(Uint256([376982354208862784, 0, 0, 0])), + AtcRational(Uint256([372773738163379840, 0, 0, 0])), + AtcRational(Uint256([368610902585751744, 0, 0, 0])), + AtcRational(Uint256([364493376245252288, 0, 0, 0])), + AtcRational(Uint256([360420692166327168, 0, 0, 0])), + AtcRational(Uint256([356392387603673216, 0, 0, 0])), + AtcRational(Uint256([352408004017130240, 0, 0, 0])), + AtcRational(Uint256([348467087046397696, 0, 0, 0])), + AtcRational(Uint256([344569186485583936, 0, 0, 0])), + AtcRational(Uint256([340713856257602176, 0, 0, 0])), + AtcRational(Uint256([336900654388419392, 0, 0, 0])), + AtcRational(Uint256([333129142981170944, 0, 0, 0])), + AtcRational(Uint256([329398888190146944, 0, 0, 0])), + AtcRational(Uint256([325709460194663424, 0, 0, 0])), + AtcRational(Uint256([322060433172825088, 0, 0, 0])), + AtcRational(Uint256([318451385275187776, 0, 0, 0])), + AtcRational(Uint256([314881898598331776, 0, 0, 0])), + AtcRational(Uint256([311351559158351808, 0, 0, 0])), + AtcRational(Uint256([307859956864274048, 0, 0, 0])), + AtcRational(Uint256([304406685491404992, 0, 0, 0])), + AtcRational(Uint256([300991342654624192, 0, 0, 0])), + AtcRational(Uint256([297613529781624320, 0, 0, 0])), + AtcRational(Uint256([294272852086108864, 0, 0, 0])), + AtcRational(Uint256([290968918540952256, 0, 0, 0])), + AtcRational(Uint256([287701341851331328, 0, 0, 0])), + AtcRational(Uint256([284469738427833696, 0, 0, 0])), + AtcRational(Uint256([281273728359550304, 0, 0, 0])), + AtcRational(Uint256([278112935387157216, 0, 0, 0])), + AtcRational(Uint256([274986986875995200, 0, 0, 0])), + AtcRational(Uint256([271895513789150592, 0, 0, 0])), + AtcRational(Uint256([268838150660545664, 0, 0, 0])), + AtcRational(Uint256([265814535568041440, 0, 0, 0])), + AtcRational(Uint256([262824310106561728, 0, 0, 0])), + AtcRational(Uint256([259867119361241024, 0, 0, 0])), + AtcRational(Uint256([256942611880603296, 0, 0, 0])), + AtcRational(Uint256([254050439649774752, 0, 0, 0])), + AtcRational(Uint256([251190258063738688, 0, 0, 0])), + AtcRational(Uint256([248361725900633600, 0, 0, 0])), + AtcRational(Uint256([245564505295100640, 0, 0, 0])), + AtcRational(Uint256([242798261711686880, 0, 0, 0])), + AtcRational(Uint256([240062663918305152, 0, 0, 0])), + AtcRational(Uint256([237357383959756160, 0, 0, 0])), + AtcRational(Uint256([234682097131319296, 0, 0, 0])), + AtcRational(Uint256([232036481952410816, 0, 0, 0])), + AtcRational(Uint256([229420220140319360, 0, 0, 0])), + AtcRational(Uint256([226832996584017152, 0, 0, 0])), + AtcRational(Uint256([224274499318053024, 0, 0, 0])), + AtcRational(Uint256([221744419496531680, 0, 0, 0])), + AtcRational(Uint256([219242451367179744, 0, 0, 0])), + AtcRational(Uint256([216768292245502976, 0, 0, 0])), + AtcRational(Uint256([214321642489039520, 0, 0, 0])), + AtcRational(Uint256([211902205471709248, 0, 0, 0])), + AtcRational(Uint256([209509687558263072, 0, 0, 0])), + AtcRational(Uint256([207143798078836928, 0, 0, 0])), + AtcRational(Uint256([204804249303609280, 0, 0, 0])), + AtcRational(Uint256([202490756417568736, 0, 0, 0])), + AtcRational(Uint256([200203037495391232, 0, 0, 0])), + AtcRational(Uint256([197940813476429664, 0, 0, 0])), + AtcRational(Uint256([195703808139820608, 0, 0, 0])), + AtcRational(Uint256([193491748079706688, 0, 0, 0])), + AtcRational(Uint256([191304362680578688, 0, 0, 0])), + AtcRational(Uint256([189141384092740352, 0, 0, 0])), + AtcRational(Uint256([187002547207894304, 0, 0, 0])), + AtcRational(Uint256([184887589634855776, 0, 0, 0])), + AtcRational(Uint256([182796251675390752, 0, 0, 0])), + AtcRational(Uint256([180728276300183808, 0, 0, 0])), + AtcRational(Uint256([178683409124936320, 0, 0, 0])), + AtcRational(Uint256([176661398386595648, 0, 0, 0])), + AtcRational(Uint256([174661994919716768, 0, 0, 0])), + AtcRational(Uint256([172684952132960128, 0, 0, 0])), + AtcRational(Uint256([170730025985722752, 0, 0, 0])), + AtcRational(Uint256([168796974964908128, 0, 0, 0])), + AtcRational(Uint256([166885560061832896, 0, 0, 0])), + AtcRational(Uint256([164995544749272480, 0, 0, 0])), + AtcRational(Uint256([163126694958648032, 0, 0, 0])), + AtcRational(Uint256([161278779057352736, 0, 0, 0])), + AtcRational(Uint256([159451567826220640, 0, 0, 0])), + AtcRational(Uint256([157644834437138816, 0, 0, 0])), + AtcRational(Uint256([155858354430802016, 0, 0, 0])), + AtcRational(Uint256([154091905694611360, 0, 0, 0])), + AtcRational(Uint256([152345268440719328, 0, 0, 0])), + AtcRational(Uint256([150618225184218048, 0, 0, 0])), + AtcRational(Uint256([148910560721475488, 0, 0, 0])), + AtcRational(Uint256([147222062108617056, 0, 0, 0])), + AtcRational(Uint256([145552518640153856, 0, 0, 0])), + AtcRational(Uint256([143901721827759536, 0, 0, 0])), + AtcRational(Uint256([142269465379193696, 0, 0, 0])), + AtcRational(Uint256([140655545177373184, 0, 0, 0])), + AtcRational(Uint256([139059759259593184, 0, 0, 0])), + AtcRational(Uint256([137481907796894496, 0, 0, 0])), + AtcRational(Uint256([135921793073581792, 0, 0, 0])), + AtcRational(Uint256([134379219466889200, 0, 0, 0])), + AtcRational(Uint256([132853993426794880, 0, 0, 0])), + AtcRational(Uint256([131345923455985760, 0, 0, 0])), + AtcRational(Uint256([129854820089970032, 0, 0, 0])), + AtcRational(Uint256([128380495877339056, 0, 0, 0])), + AtcRational(Uint256([126922765360178944, 0, 0, 0])), + AtcRational(Uint256([125481445054629696, 0, 0, 0])), + AtcRational(Uint256([124056353431594704, 0, 0, 0])), + AtcRational(Uint256([122647310897597840, 0, 0, 0])), + AtcRational(Uint256([121254139775789056, 0, 0, 0])), + AtcRational(Uint256([119876664287099296, 0, 0, 0])), + AtcRational(Uint256([118514710531542512, 0, 0, 0])), + AtcRational(Uint256([117168106469665536, 0, 0, 0])), + AtcRational(Uint256([115836681904146544, 0, 0, 0])), + AtcRational(Uint256([114520268461539280, 0, 0, 0])), + AtcRational(Uint256([113218699574165632, 0, 0, 0])), + AtcRational(Uint256([111931810462153952, 0, 0, 0])), + AtcRational(Uint256([110659438115623328, 0, 0, 0])), + AtcRational(Uint256([109401421277014816, 0, 0, 0])), + AtcRational(Uint256([108157600423566912, 0, 0, 0])), + AtcRational(Uint256([106927817749936160, 0, 0, 0])), + AtcRational(Uint256([105711917150963008, 0, 0, 0])), + AtcRational(Uint256([104509744204580720, 0, 0, 0])), + AtcRational(Uint256([103321146154867984, 0, 0, 0])), + AtcRational(Uint256([102145971895245168, 0, 0, 0])), + AtcRational(Uint256([100984071951811872, 0, 0, 0])), + AtcRational(Uint256([99835298466827488, 0, 0, 0])), + AtcRational(Uint256([98699505182332368, 0, 0, 0])), + AtcRational(Uint256([97576547423909568, 0, 0, 0])), + AtcRational(Uint256([96466282084587616, 0, 0, 0])), + AtcRational(Uint256([95368567608881936, 0, 0, 0])), + AtcRational(Uint256([94283263976975168, 0, 0, 0])), + AtcRational(Uint256([93210232689036528, 0, 0, 0])), + AtcRational(Uint256([92149336749677664, 0, 0, 0])), + AtcRational(Uint256([91100440652546432, 0, 0, 0])), + AtcRational(Uint256([90063410365056304, 0, 0, 0])), + AtcRational(Uint256([89038113313251152, 0, 0, 0])), + AtcRational(Uint256([88024418366805744, 0, 0, 0])), + AtcRational(Uint256([87022195824159632, 0, 0, 0])), + AtcRational(Uint256([86031317397784352, 0, 0, 0])), + AtcRational(Uint256([85051656199584336, 0, 0, 0])), + AtcRational(Uint256([84083086726428336, 0, 0, 0])), + AtcRational(Uint256([83125484845813488, 0, 0, 0])), + AtcRational(Uint256([82178727781658848, 0, 0, 0])), + AtcRational(Uint256([81242694100228816, 0, 0, 0])), + AtcRational(Uint256([80317263696186016, 0, 0, 0])), + AtcRational(Uint256([79402317778771824, 0, 0, 0])), + AtcRational(Uint256([78497738858114176, 0, 0, 0])), + AtcRational(Uint256([77603410731662624, 0, 0, 0])), + AtcRational(Uint256([76719218470748448, 0, 0, 0])), + AtcRational(Uint256([75845048407270416, 0, 0, 0])), + AtcRational(Uint256([74980788120504400, 0, 0, 0])), + AtcRational(Uint256([74126326424036208, 0, 0, 0])), + AtcRational(Uint256([73281553352817728, 0, 0, 0])), + AtcRational(Uint256([72446360150344240, 0, 0, 0])), + AtcRational(Uint256([71620639255952600, 0, 0, 0])), + AtcRational(Uint256([70804284292240360, 0, 0, 0])), + AtcRational(Uint256([69997190052603488, 0, 0, 0])), + AtcRational(Uint256([69199252488892648, 0, 0, 0])), + AtcRational(Uint256([68410368699187752, 0, 0, 0])), + AtcRational(Uint256([67630436915688592, 0, 0, 0])), + AtcRational(Uint256([66859356492722160, 0, 0, 0])), + AtcRational(Uint256([66097027894864808, 0, 0, 0])), + AtcRational(Uint256([65343352685178616, 0, 0, 0])), + AtcRational(Uint256([64598233513561880, 0, 0, 0])), + AtcRational(Uint256([63861574105211760, 0, 0, 0])), + AtcRational(Uint256([63133279249198800, 0, 0, 0])), + AtcRational(Uint256([62413254787153008, 0, 0, 0])), + AtcRational(Uint256([61701407602059336, 0, 0, 0])), + AtcRational(Uint256([60997645607163304, 0, 0, 0])), + AtcRational(Uint256([60301877734984648, 0, 0, 0])), + AtcRational(Uint256([59614013926438576, 0, 0, 0])), + AtcRational(Uint256([58933965120064440, 0, 0, 0])), + AtcRational(Uint256([58261643241359936, 0, 0, 0])), + AtcRational(Uint256([57596961192220440, 0, 0, 0])), + AtcRational(Uint256([56939832840483304, 0, 0, 0])), + AtcRational(Uint256([56290173009574848, 0, 0, 0])), + AtcRational(Uint256([55647897468260864, 0, 0, 0])), + AtcRational(Uint256([55012922920498480, 0, 0, 0])), + AtcRational(Uint256([54385166995389032, 0, 0, 0])), + AtcRational(Uint256([53764548237231728, 0, 0, 0])), + AtcRational(Uint256([53150986095676152, 0, 0, 0])), + AtcRational(Uint256([52544400915973480, 0, 0, 0])), + AtcRational(Uint256([51944713929325792, 0, 0, 0])), + AtcRational(Uint256([51351847243332064, 0, 0, 0])), + AtcRational(Uint256([50765723832530176, 0, 0, 0])), + AtcRational(Uint256([50186267529034840, 0, 0, 0])), + AtcRational(Uint256([49613403013269352, 0, 0, 0])), + AtcRational(Uint256([49047055804791736, 0, 0, 0])), + AtcRational(Uint256([48487152253213424, 0, 0, 0])), + AtcRational(Uint256([47933619529210104, 0, 0, 0])), + AtcRational(Uint256([47386385615624248, 0, 0, 0])), + AtcRational(Uint256([46845379298657936, 0, 0, 0])), + AtcRational(Uint256([46310530159155312, 0, 0, 0])), + AtcRational(Uint256([45781768563974600, 0, 0, 0])), + AtcRational(Uint256([45259025657447672, 0, 0, 0])), + AtcRational(Uint256([44742233352927632, 0, 0, 0])), + AtcRational(Uint256([44231324324422752, 0, 0, 0])), + AtcRational(Uint256([43726231998316280, 0, 0, 0])), + AtcRational(Uint256([43226890545171720, 0, 0, 0])), + AtcRational(Uint256([42733234871622224, 0, 0, 0])), + AtcRational(Uint256([42245200612343560, 0, 0, 0])), + AtcRational(Uint256([41762724122110312, 0, 0, 0])), + AtcRational(Uint256([41285742467933752, 0, 0, 0])), + AtcRational(Uint256([40814193421281544, 0, 0, 0])), + AtcRational(Uint256([40348015450377768, 0, 0, 0])), + AtcRational(Uint256([39887147712583024, 0, 0, 0])), + AtcRational(Uint256([39431530046853688, 0, 0, 0])), + AtcRational(Uint256([38981102966279480, 0, 0, 0])), + AtcRational(Uint256([38535807650699128, 0, 0, 0])), + AtcRational(Uint256([38095585939392688, 0, 0, 0])), + AtcRational(Uint256([37660380323850216, 0, 0, 0])), + AtcRational(Uint256([37230133940616360, 0, 0, 0])), + AtcRational(Uint256([36804790564209328, 0, 0, 0])), + AtcRational(Uint256([36384294600114552, 0, 0, 0])), + AtcRational(Uint256([35968591077851516, 0, 0, 0])), + AtcRational(Uint256([35557625644113388, 0, 0, 0])), + AtcRational(Uint256([35151344555979076, 0, 0, 0])), + AtcRational(Uint256([34749694674196404, 0, 0, 0])), + AtcRational(Uint256([34352623456536068, 0, 0, 0])), + AtcRational(Uint256([33960078951215948, 0, 0, 0])), + AtcRational(Uint256([33572009790394584, 0, 0, 0])), + AtcRational(Uint256([33188365183733360, 0, 0, 0])), + AtcRational(Uint256([32809094912027156, 0, 0, 0])), + AtcRational(Uint256([32434149320901908, 0, 0, 0])), + AtcRational(Uint256([32063479314579508, 0, 0, 0])), + AtcRational(Uint256([31697036349708460, 0, 0, 0])), + AtcRational(Uint256([31334772429260116, 0, 0, 0])), + AtcRational(Uint256([30976640096490016, 0, 0, 0])), + AtcRational(Uint256([30622592428963244, 0, 0, 0])), + AtcRational(Uint256([30272583032643336, 0, 0, 0])), + AtcRational(Uint256([29926566036044560, 0, 0, 0])), + AtcRational(Uint256([29584496084446084, 0, 0, 0])), + AtcRational(Uint256([29246328334168376, 0, 0, 0])), + AtcRational(Uint256([28912018446910460, 0, 0, 0])), + AtcRational(Uint256([28581522584147772, 0, 0, 0])), + AtcRational(Uint256([28254797401590164, 0, 0, 0])), + AtcRational(Uint256([27931800043699132, 0, 0, 0])), + AtcRational(Uint256([27612488138263732, 0, 0, 0])), + AtcRational(Uint256([27296819791035000, 0, 0, 0])), + AtcRational(Uint256([26984753580417632, 0, 0, 0])), + AtcRational(Uint256([26676248552219052, 0, 0, 0])), + AtcRational(Uint256([26371264214454720, 0, 0, 0])), + AtcRational(Uint256([26069760532209384, 0, 0, 0])), + AtcRational(Uint256([25771697922553848, 0, 0, 0])), + AtcRational(Uint256([25477037249516400, 0, 0, 0])), + AtcRational(Uint256([25185739819108396, 0, 0, 0])), + AtcRational(Uint256([24897767374403864, 0, 0, 0])), + AtcRational(Uint256([24613082090671888, 0, 0, 0])), + AtcRational(Uint256([24331646570561924, 0, 0, 0])), + AtcRational(Uint256([24053423839341064, 0, 0, 0])), + AtcRational(Uint256([23778377340182780, 0, 0, 0])), + AtcRational(Uint256([23506470929506944, 0, 0, 0])), + AtcRational(Uint256([23237668872370196, 0, 0, 0])), + AtcRational(Uint256([22971935837906256, 0, 0, 0])), + AtcRational(Uint256([22709236894815996, 0, 0, 0])), + AtcRational(Uint256([22449537506906248, 0, 0, 0])), + AtcRational(Uint256([22192803528677148, 0, 0, 0])), + AtcRational(Uint256([21939001200957664, 0, 0, 0])), + AtcRational(Uint256([21688097146588316, 0, 0, 0])), + AtcRational(Uint256([21440058366151208, 0, 0, 0])), + AtcRational(Uint256([21194852233746400, 0, 0, 0])), + AtcRational(Uint256([20952446492814320, 0, 0, 0])), + AtcRational(Uint256([20712809252003940, 0, 0, 0])), + AtcRational(Uint256([20475908981085852, 0, 0, 0])), + AtcRational(Uint256([20241714506910040, 0, 0, 0])), + AtcRational(Uint256([20010195009407928, 0, 0, 0])), + AtcRational(Uint256([19781320017637956, 0, 0, 0])), + AtcRational(Uint256([19555059405874636, 0, 0, 0])), + AtcRational(Uint256([19331383389740252, 0, 0, 0])), + AtcRational(Uint256([19110262522378940, 0, 0, 0])), + AtcRational(Uint256([18891667690672852, 0, 0, 0])), + AtcRational(Uint256([18675570111499620, 0, 0, 0])), + AtcRational(Uint256([18461941328030932, 0, 0, 0])), + AtcRational(Uint256([18250753206071836, 0, 0, 0])), + AtcRational(Uint256([18041977930440052, 0, 0, 0])), + AtcRational(Uint256([17835588001385282, 0, 0, 0])), +]; + +#[cfg(test)] +mod test { + use stacks_common::util::hash::to_hex; + use stacks_common::util::uint::Uint256; + + use crate::chainstate::burn::atc::AtcRational; + use crate::chainstate::burn::BlockSnapshot; + use crate::stacks_common::util::uint::BitArray; + + impl AtcRational { + /// Convert to f64, and panic on conversion failure + pub fn to_f64(&self) -> f64 { + let ipart = self.ipart() as f64; + let fpart = self.0.low_u64() as f64; + ipart + (fpart / (u64::MAX as f64)) + } + + /// Convert from f64 between 0 and 1, panicking on conversion failure. Scales up the f64 so that its + /// fractional parts reside in the lower 64 bits of the AtcRational. + pub fn from_f64_unit(value: f64) -> Self { + if value < 0.0 || value >= 1.0 { + panic!("only usable for values in [0.0, 1.0) range"); + } + + // NOTE: this only changes the exponent, not the mantissa. + // Moreover, u128::from(u64::MAX) + 1 has f64 representation 0x43f0000000000000, so these conversions are safe. + let scaled_value = value * ((u128::from(u64::MAX) + 1) as f64); + + // this is safe, because 0.0 <= value < 1.0, so scaled_value <= u64::MAX + let value_u64 = scaled_value as u64; + Self(Uint256::from_u64(value_u64)) + } + } + + fn check_add(num_1: u64, den_1: u64, num_2: u64, den_2: u64) { + assert!( + (AtcRational::frac(num_1, den_1) + .add(&AtcRational::frac(num_2, den_2)) + .unwrap()) + .to_f64() + .abs() + - (num_1 as f64 / den_1 as f64 + num_2 as f64 / den_2 as f64).abs() + < (1.0 / (1024.0 * 1024.0)) + ); + } + + fn check_mul(num_1: u64, den_1: u64, num_2: u64, den_2: u64) { + assert!( + (AtcRational::frac(num_1, den_1) + .mul(&AtcRational::frac(num_2, den_2)) + .unwrap()) + .to_f64() + .abs() + - ((num_1 as f64 / den_1 as f64) * (num_2 as f64 / den_2 as f64)).abs() + < (1.0 / (1024.0 * 1024.0)) + ); + } + + #[test] + fn test_atc_rational() { + // zero + assert_eq!(AtcRational::zero().into_inner(), Uint256::from_u64(0)); + + // one + assert_eq!(AtcRational::one().into_inner(), Uint256::one() << 64); + + // one_sup + assert_eq!( + AtcRational::one_sup().into_inner(), + (Uint256::one() << 64) - Uint256::from_u64(1) + ); + + // max + assert_eq!( + AtcRational::max().into_inner(), + (Uint256::from_u64(u64::MAX) << 64) | Uint256::from_u64(u64::MAX) + ); + + // ipart + assert_eq!(AtcRational::one().ipart(), 1); + assert_eq!(AtcRational::frac(1, 2).ipart(), 0); + assert_eq!(AtcRational::frac(3, 2).ipart(), 1); + assert_eq!(AtcRational::frac(4, 2).ipart(), 2); + assert_eq!(AtcRational::frac(9999, 10000).ipart(), 0); + + // to_f64 + assert_eq!(AtcRational::one().to_f64(), 1.0); + assert_eq!(AtcRational::zero().to_f64(), 0.0); + assert_eq!(AtcRational::frac(1, 2).to_f64(), 0.5); + assert_eq!(AtcRational::frac(1, 32).to_f64(), 0.03125); + + // from_f64_unit + assert_eq!(AtcRational::from_f64_unit(0.0), AtcRational::zero()); + assert_eq!(AtcRational::from_f64_unit(0.5), AtcRational::frac(1, 2)); + assert_eq!( + AtcRational::from_f64_unit(0.03125), + AtcRational::frac(1, 32) + ); + + // is_overflowed + assert!(!AtcRational::max().is_overflowed()); + assert!( + AtcRational(AtcRational::max().into_inner() + Uint256::from_u64(1)).is_overflowed() + ); + assert!(AtcRational::max() + .add(&AtcRational(Uint256::from_u64(1))) + .is_none()); + + // frac constructor produces values between 0 and u64::MAX + assert_eq!(AtcRational::frac(1, 1), AtcRational::one()); + assert_eq!( + AtcRational::frac(1, 2).0, + Uint256::from_u64(u64::MAX / 2) + Uint256::from_u64(1) + ); + assert_eq!( + AtcRational::frac(1, 4).0, + Uint256::from_u64(u64::MAX / 4) + Uint256::from_u64(1) + ); + assert_eq!( + AtcRational::frac(1, 8).0, + Uint256::from_u64(u64::MAX / 8) + Uint256::from_u64(1) + ); + assert_eq!( + AtcRational::frac(1, 16).0, + Uint256::from_u64(u64::MAX / 16) + Uint256::from_u64(1) + ); + assert_eq!( + AtcRational::frac(1, 32).0, + Uint256::from_u64(u64::MAX / 32) + Uint256::from_u64(1) + ); + + // fractions auto-normalize + assert_eq!(AtcRational::frac(2, 4), AtcRational::frac(1, 2)); + assert_eq!(AtcRational::frac(100, 400), AtcRational::frac(1, 4)); + assert_eq!(AtcRational::frac(5, 25), AtcRational::frac(1, 5)); + + // fractions can be added + assert_eq!( + AtcRational::frac(1, 2) + .add(&AtcRational::frac(1, 2)) + .unwrap(), + AtcRational::one() + ); + assert_eq!( + AtcRational::frac(1, 4) + .add(&AtcRational::frac(1, 4)) + .unwrap(), + AtcRational::frac(1, 2) + ); + assert_eq!( + AtcRational::frac(1, 8) + .add(&AtcRational::frac(1, 8)) + .unwrap(), + AtcRational::frac(1, 4) + ); + assert_eq!( + AtcRational::frac(3, 8) + .add(&AtcRational::frac(3, 8)) + .unwrap(), + AtcRational::frac(3, 4) + ); + assert_eq!( + AtcRational::max().add(&AtcRational(Uint256::from_u64(1))), + None + ); + + // fractions can be subtracted + assert_eq!( + AtcRational::frac(1, 2) + .sub(&AtcRational::frac(1, 2)) + .unwrap(), + AtcRational::zero() + ); + assert_eq!( + AtcRational::one().sub(&AtcRational::frac(1, 2)).unwrap(), + AtcRational::frac(1, 2) + ); + assert_eq!( + AtcRational::one().sub(&AtcRational::frac(1, 32)).unwrap(), + AtcRational::frac(31, 32) + ); + + // fractions can be multiplied + assert_eq!( + AtcRational::frac(1, 2) + .mul(&AtcRational::frac(1, 2)) + .unwrap(), + AtcRational::frac(1, 4) + ); + assert_eq!( + AtcRational::frac(5, 6) + .mul(&AtcRational::frac(7, 8)) + .unwrap(), + AtcRational::frac(35, 48) + ); + assert_eq!( + AtcRational::frac(100, 2) + .mul(&AtcRational::frac(200, 4)) + .unwrap(), + AtcRational::frac(20000, 8) + ); + assert_eq!( + AtcRational::frac(1, 2) + .mul(&AtcRational::frac(1024, 1)) + .unwrap(), + AtcRational::frac(512, 1) + ); + + assert_eq!( + AtcRational::frac(1, 2).min(&AtcRational::frac(15, 32)), + AtcRational::frac(15, 32) + ); + + // we only do stuff with an AtcRational in the range [0..1), since if the ATC-C is greater + // than 1.0, then the null miner never wins (and thus there's no need to compute the null + // miner probability). + // + // The only time an AtcRational is greater than 1.0 is when we scale it up to the lookup + // table index, which has 1024 items. We check that here as well. + for num_1 in 0..=1 { + for den_1 in 1..=1024 { + test_debug!("{}/{}", num_1, den_1); + for num_2 in 0..=1 { + for den_2 in 1..=1024 { + check_add(num_1, den_1, num_2, den_2); + check_mul(num_1, den_1, num_2, den_2); + check_mul(num_1, den_1, 1024, 1); + check_mul(num_2, den_2, 1024, 1); + } + } + } + } + } + + #[test] + #[ignore] + fn print_functions() { + let mut grid: Vec> = vec![vec![' '; 100]; 102]; + for i in 0..100 { + let f_atc = (i as f64) / 100.0; + let atc = AtcRational::frac(i as u64, 100); + let l_atc = BlockSnapshot::null_miner_logistic(atc).to_f64(); + let p_atc = BlockSnapshot::null_miner_probability(atc).to_f64(); + + // NOTE: columns increase downwards, so flip this + let l_atc_100 = 100 - ((l_atc * 100.0) as usize); + let p_atc_100 = 100 - ((p_atc * 100.0) as usize); + let a_atc_100 = 100 - (((1.0 - f_atc) * 100.0) as usize); + grid[a_atc_100][i] = '$'; + grid[l_atc_100][i] = '#'; + grid[p_atc_100][i] = '^'; + } + for j in 0..100 { + grid[101][j] = '_'; + } + + println!(""); + for row in grid.iter() { + let grid_str: String = row.clone().into_iter().collect(); + println!("|{}", &grid_str); + } + } + + /// Calculate the logic advantage curve for the null miner. + /// This function's parameters are chosen such that: + /// * if the ATC carryover has diminished by less than 20%, the null miner has negligible + /// chances of winning. This is to avoid punishing honest miners when there are flash blocks. + /// * If the ATC carryover has diminished by between 20% and 80%, the null miner has a + /// better-than-linear probability of winning. That is, if the burnchain MEV miner pays less + /// than X% of the expected carryover (20% <= X < 80%), then their probability of winning is + /// (1) strictly less than X%, and (2) strictly less than any Pr[X% - c] for 0 < c < X. + /// * If the ATC carryover is less than 20%, the null miner has an overwhelmingly likely chance + /// of winning (>95%). + /// + /// The logistic curve fits the points (atc=0.2, null_prob=0.75) and (atc=0.8, null_prob=0.01). + fn null_miner_logistic(atc: f64) -> f64 { + // recall the inverted logistic function: + // + // L + // f(x) = --------------------- + // -k * (x0 - x) + // 1 + e + // + // It is shaped like a *backwards* "S" -- it approaches L as `x` tends towards negative + // infinity, and it approaches 0 as `x` tends towards positive infinity. This function is + // the null miner advantage function, where `x` is the ATC carryover value. + // + // We need to drive x0 and k from our two points: + // + // (x1, y1) = (0.2, 0.75) + // (x2, y2) = (0.8, 0.01) + // + // to derive L, x0, and k: + // L = 0.8 + // z = ln(L/y1 - 1) / ln(L/y2 - 1) + // x0 = (x1 - z * x2) / (1 - z) + // k = ln(L/y1 - 1) / (x1 - x0) + // + // The values for x0 and k were generated with the following GNU bc script: + // ``` + // $ cat /tmp/variables.bc + // scale=32 + // supremum=0.8 /* this is L */ + // x1=0.2 + // y1=0.75 + // x2=0.8 + // y2=0.01 + // z=l(supremum/y1 - 1)/l(supremum/y2 -1) + // x0=(x1 - z * x2)/(1 - z) + // k=l(supremum/y1 - 1)/(x1 - x0) + // print "x0 = "; x0 + // print "k = "; k + // ``` + // + // This script evaluates to: + // ``` + // $ bc -l < /tmp/variables.bc + // x0 = .42957690816204645842320195118064 + // k = 11.79583008928205260028158351938437 + // ``` + + let L: f64 = 0.8; + + // truncated f64 + let x0: f64 = 0.42957690816204647; + let k: f64 = 11.795830089282052; + + // natural logarithm constant + let e: f64 = 2.718281828459045; + + let adv = L / (1.0 + e.powf(-k * (x0 - atc))); + adv + } + + #[test] + fn make_null_miner_lookup_table() { + use crate::chainstate::burn::atc::ATC_LOOKUP; + let mut lookup_table = Vec::with_capacity(1024); + for atc in 0..1024 { + let fatc = (atc as f64) / 1024.0; + let lgst_fatc = null_miner_logistic(fatc); + let lgst_rational = AtcRational::from_f64_unit(lgst_fatc); + assert_eq!(ATC_LOOKUP[atc], lgst_rational); + assert_eq!(ATC_LOOKUP[atc].to_f64(), lgst_fatc); + lookup_table.push(lgst_rational); + } + println!("["); + for lt in lookup_table.into_iter() { + let inner = lt.into_inner(); + println!(" AtcRational(Uint256({:?})),", &inner.0); + } + println!("]"); + } +} diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 760188829c..d6c33ab608 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -130,23 +130,6 @@ impl<'a> SortitionHandleTx<'a> { e })?; - let total_burn = state_transition - .accepted_ops - .iter() - .try_fold(0u64, |acc, op| { - let bf = match op { - BlockstackOperationType::LeaderBlockCommit(ref op) => op.burn_fee, - _ => 0, - }; - acc.checked_add(bf) - }); - - let txids = state_transition - .accepted_ops - .iter() - .map(|ref op| op.txid()) - .collect(); - let next_pox = SortitionDB::make_next_pox_id(parent_pox.clone(), next_pox_info.as_ref()); let next_sortition_id = SortitionDB::make_next_sortition_id( parent_pox.clone(), @@ -162,9 +145,7 @@ impl<'a> SortitionHandleTx<'a> { &next_pox, parent_snapshot, block_header, - &state_transition.burn_dist, - &txids, - total_burn, + &state_transition, initial_mining_bonus_ustx, ) .map_err(|e| { diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 8ffda83719..e3802d6ec1 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -50,9 +50,9 @@ use crate::burnchains::affirmation::{AffirmationMap, AffirmationMapEntry}; use crate::burnchains::bitcoin::BitcoinNetworkType; use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; use crate::burnchains::{ - Address, Burnchain, BurnchainBlockHeader, BurnchainRecipient, BurnchainStateTransition, - BurnchainStateTransitionOps, BurnchainTransaction, BurnchainView, Error as BurnchainError, - PoxConstants, PublicKey, Txid, + Address, Burnchain, BurnchainBlockHeader, BurnchainRecipient, BurnchainSigner, + BurnchainStateTransition, BurnchainStateTransitionOps, BurnchainTransaction, BurnchainView, + Error as BurnchainError, PoxConstants, PublicKey, Txid, }; use crate::chainstate::burn::operations::leader_block_commit::{ MissedBlockCommit, RewardSetInfo, OUTPUTS_PER_COMMIT, @@ -744,7 +744,7 @@ const SORTITION_DB_SCHEMA_8: &'static [&'static str] = &[ );"#, ]; -const LAST_SORTITION_DB_INDEX: &'static str = "index_vote_for_aggregate_key_burn_header_hash"; +const LAST_SORTITION_DB_INDEX: &'static str = "index_block_commits_by_sender"; const SORTITION_DB_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS snapshots_block_hashes ON snapshots(block_height,index_root,winning_stacks_block_hash);", "CREATE INDEX IF NOT EXISTS snapshots_block_stacks_hashes ON snapshots(num_sortitions,index_root,winning_stacks_block_hash);", @@ -766,14 +766,34 @@ const SORTITION_DB_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS index_burn_header_hash_pox_valid ON snapshots(burn_header_hash,pox_valid);", "CREATE INDEX IF NOT EXISTS index_delegate_stx_burn_header_hash ON delegate_stx(burn_header_hash);", "CREATE INDEX IF NOT EXISTS index_vote_for_aggregate_key_burn_header_hash ON vote_for_aggregate_key(burn_header_hash);", + "CREATE INDEX IF NOT EXISTS index_block_commits_by_burn_height ON block_commits(block_height);", + "CREATE INDEX IF NOT EXISTS index_block_commits_by_sender ON block_commits(apparent_sender);" ]; +/// Handle to the sortition database, a MARF'ed sqlite DB on disk. +/// It stores information pertaining to cryptographic sortitions performed in each Bitcoin block -- +/// either to select the next Stacks block (in epoch 2.5 and earlier), or to choose the next Stacks +/// miner (epoch 3.0 and later). pub struct SortitionDB { + /// Whether or not write operations are permitted. Pertains to whether or not transaction + /// objects can be created or schema migrations can happen on this SortitionDB instance. pub readwrite: bool, + /// If true, then while write operations will be permitted, they will not be committed (and may + /// even be skipped). This is not used in production; it's used in the `stacks-inspect` tool + /// to simulate what could happen (e.g. to replay sortitions with different anti-MEV strategies + /// without corrupting the underlying DB). + pub dryrun: bool, + /// Handle to the MARF which stores an index over each burnchain and PoX fork. pub marf: MARF, + /// First burnchain block height at which sortitions will be considered. All Stacks epochs + /// besides epoch 1.0 must start at or after this height. pub first_block_height: u64, + /// Hash of the first burnchain block at which sortitions will be considered. pub first_burn_header_hash: BurnchainHeaderHash, + /// PoX constants that pertain to this DB, for purposes of (but not limited to) evaluating PoX + /// reward cycles and evaluating block-commit validity within a PoX reward cycle pub pox_constants: PoxConstants, + /// Path on disk from which this DB was opened (caller-given; not resolved). pub path: String, } @@ -781,6 +801,7 @@ pub struct SortitionDB { pub struct SortitionDBTxContext { pub first_block_height: u64, pub pox_constants: PoxConstants, + pub dryrun: bool, } #[derive(Clone)] @@ -788,6 +809,7 @@ pub struct SortitionHandleContext { pub first_block_height: u64, pub pox_constants: PoxConstants, pub chain_tip: SortitionId, + pub dryrun: bool, } pub type SortitionDBConn<'a> = IndexDBConn<'a, SortitionDBTxContext, SortitionId>; @@ -1130,6 +1152,7 @@ impl<'a> SortitionHandleTx<'a> { chain_tip: parent_chain_tip.clone(), first_block_height: conn.first_block_height, pox_constants: conn.pox_constants.clone(), + dryrun: conn.dryrun, }, ); @@ -2019,6 +2042,7 @@ impl<'a> SortitionHandleConn<'a> { chain_tip: chain_tip.clone(), first_block_height: connection.context.first_block_height, pox_constants: connection.context.pox_constants.clone(), + dryrun: connection.context.dryrun, }, index: &connection.index, }) @@ -2586,6 +2610,7 @@ impl SortitionDB { SortitionDBTxContext { first_block_height: self.first_block_height, pox_constants: self.pox_constants.clone(), + dryrun: self.dryrun, }, ); Ok(index_tx) @@ -2598,6 +2623,7 @@ impl SortitionDB { SortitionDBTxContext { first_block_height: self.first_block_height, pox_constants: self.pox_constants.clone(), + dryrun: self.dryrun, }, ) } @@ -2609,6 +2635,7 @@ impl SortitionDB { first_block_height: self.first_block_height, chain_tip: chain_tip.clone(), pox_constants: self.pox_constants.clone(), + dryrun: self.dryrun, }, ) } @@ -2620,13 +2647,13 @@ impl SortitionDB { if !self.readwrite { return Err(db_error::ReadOnly); } - Ok(SortitionHandleTx::new( &mut self.marf, SortitionHandleContext { first_block_height: self.first_block_height, chain_tip: chain_tip.clone(), pox_constants: self.pox_constants.clone(), + dryrun: self.dryrun, }, )) } @@ -2666,6 +2693,7 @@ impl SortitionDB { path: path.to_string(), marf, readwrite, + dryrun: false, pox_constants, first_block_height, first_burn_header_hash, @@ -2723,6 +2751,7 @@ impl SortitionDB { path: path.to_string(), marf, readwrite, + dryrun: false, first_block_height, pox_constants, first_burn_header_hash: first_burn_hash.clone(), @@ -3428,6 +3457,7 @@ impl SortitionDB { path: path.to_string(), marf, readwrite: true, + dryrun: false, first_block_height: migrator.get_burnchain().first_block_height, first_burn_header_hash: migrator.get_burnchain().first_block_hash.clone(), pox_constants: migrator.get_burnchain().pox_constants.clone(), @@ -3585,6 +3615,23 @@ impl SortitionDB { Ok(rc_info) } + + pub fn get_preprocessed_reward_set_size(&self, tip: &SortitionId) -> Option { + let Ok(Some(reward_info)) = &self.get_preprocessed_reward_set_of(&tip) else { + return None; + }; + let Some(reward_set) = reward_info.known_selected_anchor_block() else { + return None; + }; + + reward_set + .signers + .clone() + .map(|x| x.len()) + .unwrap_or(0) + .try_into() + .ok() + } } impl<'a> SortitionDBTx<'a> { @@ -3616,6 +3663,7 @@ impl<'a> SortitionDBConn<'a> { first_block_height: self.context.first_block_height.clone(), chain_tip: chain_tip.clone(), pox_constants: self.context.pox_constants.clone(), + dryrun: self.context.dryrun, }, } } @@ -4091,6 +4139,7 @@ impl SortitionDB { next_pox_info: Option, announce_to: F, ) -> Result<(BlockSnapshot, BurnchainStateTransition), BurnchainError> { + let dryrun = self.dryrun; let parent_sort_id = self .get_sortition_id(&burn_header.parent_block_hash, from_tip)? .ok_or_else(|| { @@ -4170,14 +4219,19 @@ impl SortitionDB { initial_mining_bonus, )?; - sortition_db_handle.store_transition_ops(&new_snapshot.0.sortition_id, &new_snapshot.1)?; + if !dryrun { + sortition_db_handle + .store_transition_ops(&new_snapshot.0.sortition_id, &new_snapshot.1)?; + } announce_to(reward_set_info); - // commit everything! - sortition_db_handle.commit().expect( - "Failed to commit to sortition db after announcing reward set info, state corrupted.", - ); + if !dryrun { + // commit everything! + sortition_db_handle.commit().expect( + "Failed to commit to sortition db after announcing reward set info, state corrupted.", + ); + } Ok((new_snapshot.0, new_snapshot.1)) } @@ -5243,6 +5297,11 @@ impl<'a> SortitionHandleTx<'a> { sn.canonical_stacks_tip_consensus_hash = parent_sn.canonical_stacks_tip_consensus_hash; } + if self.context.dryrun { + // don't do any inserts + return Ok(root_hash); + } + self.insert_block_snapshot(&sn, pox_payout)?; for block_op in block_ops { @@ -6050,19 +6109,23 @@ impl<'a> SortitionHandleTx<'a> { vec![] }; - // commit to all newly-arrived blocks - let (mut block_arrival_keys, mut block_arrival_values) = - self.process_new_block_arrivals(parent_snapshot)?; - keys.append(&mut block_arrival_keys); - values.append(&mut block_arrival_values); - // store each indexed field - let root_hash = self.put_indexed_all( - &parent_snapshot.sortition_id, - &snapshot.sortition_id, - &keys, - &values, - )?; + let root_hash = if !self.context.dryrun { + // commit to all newly-arrived blocks + let (mut block_arrival_keys, mut block_arrival_values) = + self.process_new_block_arrivals(parent_snapshot)?; + keys.append(&mut block_arrival_keys); + values.append(&mut block_arrival_values); + + self.put_indexed_all( + &parent_snapshot.sortition_id, + &snapshot.sortition_id, + &keys, + &values, + )? + } else { + TrieHash([0x00; 32]) + }; // pox payout addrs must include burn addresses let num_pox_payouts = self.get_num_pox_payouts(snapshot.block_height); @@ -6503,6 +6566,7 @@ pub mod tests { path: path.to_string(), marf, readwrite, + dryrun: false, first_block_height, first_burn_header_hash: first_burn_hash.clone(), pox_constants: PoxConstants::test_default(), @@ -6712,6 +6776,18 @@ pub mod tests { } Ok(ret) } + + /// Get the last block-commit from a given sender + pub fn get_last_block_commit_by_sender( + conn: &DBConn, + sender: &BurnchainSigner, + ) -> Result, db_error> { + let apparent_sender_str = + serde_json::to_string(sender).map_err(|e| db_error::SerializationError(e))?; + let sql = "SELECT * FROM block_commits WHERE apparent_sender = ?1 ORDER BY block_height DESC LIMIT 1"; + let args = rusqlite::params![&apparent_sender_str]; + query_row(conn, sql, args) + } } #[test] diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index 2a16897100..8687de754d 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -31,15 +31,22 @@ use crate::chainstate::burn::operations::{ BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use crate::chainstate::stacks::StacksPublicKey; -use crate::core::MINING_COMMITMENT_WINDOW; use crate::monitoring; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct BurnSamplePoint { + /// min(median_burn, most_recent_burn) pub burns: u128, + /// median burn over the UTXO chain pub median_burn: u128, + /// how many times did this miner mine in the window (i.e. how long is the UTXO chain for this + /// candidate in this window). + pub frequency: u8, + /// distribution range start in a [0, 2**256) interval pub range_start: Uint256, + /// distribution range end in a [0, 2**256) interval pub range_end: Uint256, + /// block-commit from the miner candidate pub candidate: LeaderBlockCommitOp, } @@ -94,11 +101,25 @@ impl LinkedCommitIdentifier { } impl BurnSamplePoint { + pub fn zero(candidate: LeaderBlockCommitOp) -> Self { + Self { + burns: 0, + median_burn: 0, + frequency: 0, + range_start: Uint256::zero(), + range_end: Uint256::zero(), + candidate, + } + } + fn sanity_check_window( + miner_commitment_window: u8, block_commits: &Vec>, missed_commits: &Vec>, ) { - assert!(block_commits.len() <= (MINING_COMMITMENT_WINDOW as usize)); + assert!( + block_commits.len() <= usize::try_from(miner_commitment_window).expect("infallible") + ); assert_eq!(missed_commits.len() + 1, block_commits.len()); let mut block_height_at_index = None; for (index, commits) in block_commits.iter().enumerate() { @@ -151,6 +172,7 @@ impl BurnSamplePoint { /// `OP_RETURN` payload. The length of this vector must be equal to the length of the /// `block_commits` vector. `burn_blocks[i]` is `true` if the `ith` block-commit must be PoB. pub fn make_min_median_distribution( + mining_commitment_window: u8, mut block_commits: Vec>, mut missed_commits: Vec>, burn_blocks: Vec, @@ -158,7 +180,11 @@ impl BurnSamplePoint { // sanity check let window_size = block_commits.len() as u8; assert!(window_size > 0); - BurnSamplePoint::sanity_check_window(&block_commits, &missed_commits); + BurnSamplePoint::sanity_check_window( + mining_commitment_window, + &block_commits, + &missed_commits, + ); assert_eq!(burn_blocks.len(), block_commits.len()); // first, let's link all of the current block commits to the priors @@ -268,6 +294,17 @@ impl BurnSamplePoint { }; let burns = cmp::min(median_burn, most_recent_burn); + + let frequency = linked_commits.iter().fold(0u8, |count, commit_opt| { + if commit_opt.is_some() { + count + .checked_add(1) + .expect("infallable -- commit window exceeds u8::MAX") + } else { + count + } + }); + let candidate = if let LinkedCommitIdentifier::Valid(op) = linked_commits.remove(0).unwrap().op { @@ -281,11 +318,13 @@ impl BurnSamplePoint { "txid" => %candidate.txid.to_string(), "most_recent_burn" => %most_recent_burn, "median_burn" => %median_burn, + "frequency" => frequency, "all_burns" => %format!("{:?}", all_burns)); BurnSamplePoint { burns, median_burn, + frequency, range_start: Uint256::zero(), // To be filled in range_end: Uint256::zero(), // To be filled in candidate, @@ -324,14 +363,6 @@ impl BurnSamplePoint { } } - #[cfg(test)] - pub fn make_distribution( - all_block_candidates: Vec, - _consumed_leader_keys: Vec, - ) -> Vec { - Self::make_min_median_distribution(vec![all_block_candidates], vec![], vec![true]) - } - /// Calculate the ranges between 0 and 2**256 - 1 over which each point in the burn sample /// applies, so we can later select which block to use. fn make_sortition_ranges(burn_sample: &mut Vec) -> () { @@ -423,6 +454,21 @@ mod tests { use crate::chainstate::stacks::StacksPublicKey; use crate::core::MINING_COMMITMENT_WINDOW; + impl BurnSamplePoint { + pub fn make_distribution( + mining_commitment_window: u8, + all_block_candidates: Vec, + _consumed_leader_keys: Vec, + ) -> Vec { + Self::make_min_median_distribution( + mining_commitment_window, + vec![all_block_candidates], + vec![], + vec![true], + ) + } + } + struct BurnDistFixture { consumed_leader_keys: Vec, block_commits: Vec, @@ -531,6 +577,7 @@ mod tests { ]; let mut result = BurnSamplePoint::make_min_median_distribution( + MINING_COMMITMENT_WINDOW, commits.clone(), vec![vec![]; (MINING_COMMITMENT_WINDOW - 1) as usize], vec![false, false, false, true, true, true], @@ -564,6 +611,7 @@ mod tests { // miner 2 => min = 1, median = 3, last_burn = 3 let mut result = BurnSamplePoint::make_min_median_distribution( + MINING_COMMITMENT_WINDOW, commits.clone(), vec![vec![]; (MINING_COMMITMENT_WINDOW - 1) as usize], vec![false, false, false, true, true, true], @@ -624,6 +672,7 @@ mod tests { ]; let mut result = BurnSamplePoint::make_min_median_distribution( + MINING_COMMITMENT_WINDOW, commits.clone(), vec![vec![]; (MINING_COMMITMENT_WINDOW - 1) as usize], vec![false, false, false, false, false, false], @@ -677,6 +726,7 @@ mod tests { ]; let mut result = BurnSamplePoint::make_min_median_distribution( + MINING_COMMITMENT_WINDOW, commits.clone(), vec![vec![]; (MINING_COMMITMENT_WINDOW - 1) as usize], vec![false, false, false, false, false, false], @@ -733,6 +783,7 @@ mod tests { ]; let mut result = BurnSamplePoint::make_min_median_distribution( + MINING_COMMITMENT_WINDOW, commits.clone(), missed_commits.clone(), vec![false, false, false, false, false, false], @@ -998,6 +1049,7 @@ mod tests { median_burn: block_commit_1.burn_fee.into(), range_start: Uint256::zero(), range_end: Uint256::max(), + frequency: 1, candidate: block_commit_1.clone(), }], }, @@ -1016,12 +1068,14 @@ mod tests { 0xffffffffffffffff, 0x7fffffffffffffff, ]), + frequency: 1, candidate: block_commit_1.clone(), }, BurnSamplePoint { burns: block_commit_2.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256([ 0xffffffffffffffff, 0xffffffffffffffff, @@ -1041,6 +1095,7 @@ mod tests { burns: block_commit_1.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256::zero(), range_end: Uint256([ 0xffffffffffffffff, @@ -1054,6 +1109,7 @@ mod tests { burns: block_commit_2.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256([ 0xffffffffffffffff, 0xffffffffffffffff, @@ -1073,6 +1129,7 @@ mod tests { burns: block_commit_1.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256::zero(), range_end: Uint256([ 0xffffffffffffffff, @@ -1086,6 +1143,7 @@ mod tests { burns: block_commit_2.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256([ 0xffffffffffffffff, 0xffffffffffffffff, @@ -1105,6 +1163,7 @@ mod tests { burns: block_commit_1.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256::zero(), range_end: Uint256([ 0xffffffffffffffff, @@ -1118,6 +1177,7 @@ mod tests { burns: block_commit_2.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256([ 0xffffffffffffffff, 0xffffffffffffffff, @@ -1137,6 +1197,7 @@ mod tests { burns: block_commit_1.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256::zero(), range_end: Uint256([ 0xffffffffffffffff, @@ -1150,6 +1211,7 @@ mod tests { burns: block_commit_2.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256([ 0xffffffffffffffff, 0xffffffffffffffff, @@ -1169,6 +1231,7 @@ mod tests { burns: block_commit_1.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256::zero(), range_end: Uint256([ 0xffffffffffffffff, @@ -1182,6 +1245,7 @@ mod tests { burns: block_commit_2.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256([ 0xffffffffffffffff, 0xffffffffffffffff, @@ -1208,6 +1272,7 @@ mod tests { BurnSamplePoint { burns: block_commit_1.burn_fee.into(), median_burn: block_commit_2.burn_fee.into(), + frequency: 1, range_start: Uint256::zero(), range_end: Uint256([ 0x3ed94d3cb0a84709, @@ -1220,6 +1285,7 @@ mod tests { BurnSamplePoint { burns: block_commit_2.burn_fee.into(), median_burn: block_commit_2.burn_fee.into(), + frequency: 1, range_start: Uint256([ 0x3ed94d3cb0a84709, 0x0963dded799a7c1a, @@ -1237,6 +1303,7 @@ mod tests { BurnSamplePoint { burns: (block_commit_3.burn_fee).into(), median_burn: block_commit_3.burn_fee.into(), + frequency: 1, range_start: Uint256([ 0x7db29a7961508e12, 0x12c7bbdaf334f834, @@ -1254,6 +1321,7 @@ mod tests { let f = &fixtures[i]; eprintln!("Fixture #{}", i); let dist = BurnSamplePoint::make_distribution( + MINING_COMMITMENT_WINDOW, f.block_commits.iter().cloned().collect(), f.consumed_leader_keys.iter().cloned().collect(), ); diff --git a/stackslib/src/chainstate/burn/mod.rs b/stackslib/src/chainstate/burn/mod.rs index b764344eb6..be92c3088f 100644 --- a/stackslib/src/chainstate/burn/mod.rs +++ b/stackslib/src/chainstate/burn/mod.rs @@ -38,6 +38,7 @@ use crate::chainstate::burn::db::sortdb::SortitionHandleTx; use crate::core::SYSTEM_FORK_SET_VERSION; use crate::util_lib::db::Error as db_error; +pub mod atc; /// This module contains the code for processing the burn chain state database pub mod db; pub mod distribution; @@ -223,7 +224,7 @@ impl Opcodes { } impl OpsHash { - pub fn from_txids(txids: &Vec) -> OpsHash { + pub fn from_txids(txids: &[Txid]) -> OpsHash { // NOTE: unlike stacks v1, we calculate the ops hash simply // from a hash-chain of txids. There is no weird serialization // of operations, and we don't construct a merkle tree over diff --git a/stackslib/src/chainstate/burn/operations/mod.rs b/stackslib/src/chainstate/burn/operations/mod.rs index 90f7f79291..5417a3a7c9 100644 --- a/stackslib/src/chainstate/burn/operations/mod.rs +++ b/stackslib/src/chainstate/burn/operations/mod.rs @@ -41,7 +41,6 @@ use crate::util_lib::db::{DBConn, DBTx, Error as db_error}; pub mod delegate_stx; pub mod leader_block_commit; -/// This module contains all burn-chain operations pub mod leader_key_register; pub mod stack_stx; pub mod transfer_stx; @@ -50,6 +49,8 @@ pub mod vote_for_aggregate_key; #[cfg(test)] mod test; +/// This module contains all burn-chain operations + #[derive(Debug)] pub enum Error { /// Failed to parse the operation from the burnchain transaction @@ -269,7 +270,7 @@ pub struct DelegateStxOp { pub sender: StacksAddress, pub delegate_to: StacksAddress, /// a tuple representing the output index of the reward address in the BTC transaction, - // and the actual PoX reward address. + /// and the actual PoX reward address. /// NOTE: the address in .pox-2 will be tagged as either p2pkh or p2sh; it's impossible to tell /// if it's a segwit-p2sh since that looks identical to a p2sh address. pub reward_addr: Option<(u32, PoxAddress)>, diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index 7d86f84803..9f3bc5d5ea 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -26,9 +26,11 @@ use stacks_common::util::log; use stacks_common::util::uint::{BitArray, Uint256, Uint512}; use crate::burnchains::{ - Address, Burnchain, BurnchainBlock, BurnchainBlockHeader, PublicKey, Txid, + Address, Burnchain, BurnchainBlock, BurnchainBlockHeader, BurnchainSigner, + BurnchainStateTransition, PublicKey, Txid, }; -use crate::chainstate::burn::db::sortdb::SortitionHandleTx; +use crate::chainstate::burn::atc::{AtcRational, ATC_LOOKUP}; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleTx}; use crate::chainstate::burn::distribution::BurnSamplePoint; use crate::chainstate::burn::operations::{ BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, @@ -143,8 +145,8 @@ impl BlockSnapshot { for i in 0..dist.len() { if (dist[i].range_start <= index) && (index < dist[i].range_end) { debug!( - "Sampled {}: sortition index = {}", - dist[i].candidate.block_header_hash, &index + "Sampled {}: i = {}, sortition index = {}", + dist[i].candidate.block_header_hash, i, &index ); return Some(i); } @@ -154,30 +156,21 @@ impl BlockSnapshot { panic!("FATAL ERROR: unable to map {} to a range", index); } - /// Select the next Stacks block header hash using cryptographic sortition. - /// Go through all block commits at this height, find out how any burn tokens - /// were spent for them, and select one at random using the relative burn amounts - /// to weight the sample. Use HASH(sortition_hash ++ last_VRF_seed) to pick the - /// winning block commit, and by extension, the next VRF seed. - /// - /// If there are no block commits outstanding, then no winner is picked. - /// - /// Note that the VRF seed is not guaranteed to be the hash of a valid VRF - /// proof. Miners would only build off of leader block commits for which they - /// (1) have the associated block data and (2) the proof in that block is valid. - fn select_winning_block( + /// Get the last winning miner's VRF seed in this block's fork. + /// Returns Ok(VRF seed) on success + /// Returns Err(..) on DB error + /// An initial VRF seed value will be returned if there are no prior commits. + fn get_last_vrf_seed( sort_tx: &mut SortitionHandleTx, block_header: &BurnchainBlockHeader, - sortition_hash: &SortitionHash, - burn_dist: &[BurnSamplePoint], - ) -> Result, db_error> { + ) -> Result { let burn_block_height = block_header.block_height; // get the last winner's VRF seed in this block's fork let last_sortition_snapshot = sort_tx.get_last_snapshot_with_sortition(burn_block_height - 1)?; - let VRF_seed = if last_sortition_snapshot.is_initial() { + let vrf_seed = if last_sortition_snapshot.is_initial() { // this is the sentinal "first-sortition" block VRFSeed::initial() } else { @@ -190,10 +183,31 @@ impl BlockSnapshot { .expect("FATAL ERROR: no winning block commits in database (indicates corruption)") .new_seed }; + Ok(vrf_seed) + } + + /// Select the next Stacks block header hash using cryptographic sortition. + /// Go through all block commits at this height, find out how many burn tokens + /// were spent for them, and select one at random using the relative burn amounts + /// to weight the sample. Use HASH(sortition_hash ++ last_VRF_seed) to pick the + /// winning block commit, and by extension, the next VRF seed. + /// + /// If there are no block commits outstanding, then no winner is picked. + /// + /// Note that the VRF seed is not guaranteed to be the hash of a valid VRF + /// proof. Miners would only build off of leader block commits for which they + /// (1) have the associated block data and (2) the proof in that block is valid. + fn select_winning_block( + sort_tx: &mut SortitionHandleTx, + block_header: &BurnchainBlockHeader, + sortition_hash: &SortitionHash, + burn_dist: &[BurnSamplePoint], + ) -> Result, db_error> { + let vrf_seed = Self::get_last_vrf_seed(sort_tx, block_header)?; // pick the next winner let win_idx_opt = - BlockSnapshot::sample_burn_distribution(burn_dist, &VRF_seed, sortition_hash); + BlockSnapshot::sample_burn_distribution(burn_dist, &vrf_seed, sortition_hash); match win_idx_opt { None => { // no winner @@ -201,7 +215,7 @@ impl BlockSnapshot { } Some(win_idx) => { // winner! - Ok(Some(burn_dist[win_idx].candidate.clone())) + Ok(Some((burn_dist[win_idx].candidate.clone(), win_idx))) } } } @@ -216,7 +230,7 @@ impl BlockSnapshot { first_block_height: u64, burn_total: u64, sortition_hash: &SortitionHash, - txids: &Vec, + txids: &[Txid], accumulated_coinbase_ustx: u128, ) -> Result { let block_height = block_header.block_height; @@ -269,6 +283,210 @@ impl BlockSnapshot { }) } + /// Determine if we need to reject a block-commit due to miner inactivity. + /// Return true if the miner is sufficiently active. + /// Return false if not. + fn check_miner_is_active( + epoch_id: StacksEpochId, + sampled_window_len: usize, + winning_block_sender: &BurnchainSigner, + miner_frequency: u8, + ) -> bool { + // miner frequency only applies if the window is at least as long as the commit window + // sampled from the chain state (e.g. because this window can be 1 during the prepare + // phase) + let epoch_frequency_usize = + usize::try_from(epoch_id.mining_commitment_frequency()).expect("Infallible"); + if usize::from(miner_frequency) < epoch_frequency_usize.min(sampled_window_len) { + // this miner didn't mine often enough to win anyway + info!("Miner did not mine often enough to win"; + "miner_sender" => %winning_block_sender, + "miner_frequency" => miner_frequency, + "minimum_frequency" => epoch_id.mining_commitment_frequency(), + "window_length" => sampled_window_len); + + return false; + } + + true + } + + /// Determine the miner's assumed total commit carryover. + /// + /// total-block-spend + /// This is ATC = min(1, ----------------------------------- ) + /// median-windowed-total-block-spend + /// + /// Now, this value is 1.0 in the "happy path" case where miners commit the same BTC in this + /// block as they had done so over the majority of the windowed burnchain blocks. + /// + /// It's also 1.0 if miners spend _more_ than this median. + /// + /// It's between 0.0 and 1.0 only if miners spend _less_ than this median. At this point, it's + /// possible that the "null miner" can win sortition, and the probability of that null miner + /// winning is a function of (1.0 - ATC). + /// + /// Returns the ATC value, and whether or not it decreased. If the ATC decreased, then we must + /// invoke the null miner. + fn get_miner_commit_carryover( + total_burns: Option, + windowed_median_burns: Option, + ) -> (AtcRational, bool) { + let Some(block_burn_total) = total_burns else { + // overflow + return (AtcRational::zero(), false); + }; + + let Some(windowed_median_burns) = windowed_median_burns else { + // overflow + return (AtcRational::zero(), false); + }; + + if windowed_median_burns == 0 { + // no carried commit, so null miner wins by default. + return (AtcRational::zero(), true); + } + + if block_burn_total >= windowed_median_burns { + // clamp to 1.0, and ATC increased + return (AtcRational::one(), false); + } + + ( + AtcRational::frac(block_burn_total, windowed_median_burns), + true, + ) + } + + /// Evaluate the advantage logistic function on the given ATC value. + /// The ATC value will be used to index a lookup table of AtcRationals. + pub(crate) fn null_miner_logistic(atc: AtcRational) -> AtcRational { + let atc_clamp = atc.min(&AtcRational::one()); + let index_max = + u64::try_from(ATC_LOOKUP.len() - 1).expect("infallible -- u64 can't hold 1023usize"); + let index_u64 = if let Some(index_rational) = atc_clamp.mul(&AtcRational::frac(1024, 1)) { + // extract integer part + index_rational.ipart().min(index_max) + } else { + index_max + }; + let index = usize::try_from(index_u64) + .expect("infallible -- usize can't hold u64 integers in [0, 1024)"); + ATC_LOOKUP + .get(index) + .cloned() + .unwrap_or_else(|| ATC_LOOKUP.last().cloned().expect("infallible")) + } + + /// Determine the probability that the null miner will win, given the atc shortage. + /// + /// This is NullP(atc) = (1 - atc) + atc * adv(atc). + /// + /// Where adv(x) is an "advantage function", such that the null miner is more heavily favored + /// to win based on how comparatively little commit carryover there is. Here, adv(x) is a + /// logistic function. + /// + /// In a linear setting -- i.e. the probability of the null miner winning being proportional to + /// the missing carryover -- the probability would simply be (1 - atc). If miners spent only + /// X% of the assumed total commit, then the null miner ought to win with probability (1 - X)%. + /// However, the null miner is advantaged more if the missing carryover is smaller. This is + /// captured with the extra `atc * adv(atc)` term. + pub(crate) fn null_miner_probability(atc: AtcRational) -> AtcRational { + // compute min(1.0, (1.0 - atc) + (atc * adv)) + let adv = Self::null_miner_logistic(atc); + let Some(one_minus_atc) = AtcRational::one().sub(&atc) else { + // somehow, ATC > 1.0, then miners spent more than they did in the last sortition. + // So, the null miner loses. + warn!("ATC > 1.0 ({})", &atc.to_hex()); + return AtcRational::zero(); + }; + + let Some(atc_prod_adv) = atc.mul(&adv) else { + // if this is somehow too big (impossible), it would otherwise imply that the null + // miner advantage is overwhelming + warn!("ATC * ADV == INF ({} * {})", &atc.to_hex(), &adv.to_hex()); + return AtcRational::one(); + }; + + let Some(sum) = one_minus_atc.add(&atc_prod_adv) else { + // if this is somehow too big (impossible), it would otherwise imply that the null + // miner advantage is overwhelming + warn!( + "(1.0 - ATC) + (ATC * ADV) == INF ({} * {})", + &one_minus_atc.to_hex(), + &atc_prod_adv.to_hex() + ); + return AtcRational::one(); + }; + sum.min(&AtcRational::one()) + } + + /// Determine whether or not the null miner has won sortition. + /// This works by creating a second burn distribution: one with the winning block-commit, and + /// one with the null miner. The null miner's mining power will be computed as a function of + /// their ATC advantage. + fn null_miner_wins( + sort_tx: &mut SortitionHandleTx, + block_header: &BurnchainBlockHeader, + sortition_hash: &SortitionHash, + commit_winner: &LeaderBlockCommitOp, + atc: AtcRational, + ) -> Result { + let vrf_seed = Self::get_last_vrf_seed(sort_tx, block_header)?; + + let mut null_winner = commit_winner.clone(); + null_winner.block_header_hash = { + // make the block header hash different, to render it different from the winner. + // Just flip the block header bits. + let mut bhh_bytes = null_winner.block_header_hash.0.clone(); + for byte in bhh_bytes.iter_mut() { + *byte = !*byte; + } + BlockHeaderHash(bhh_bytes) + }; + + let mut null_sample_winner = BurnSamplePoint::zero(null_winner.clone()); + let mut burn_sample_winner = BurnSamplePoint::zero(commit_winner.clone()); + + let null_prob = Self::null_miner_probability(atc); + let null_prob_u256 = null_prob.into_sortition_probability(); + + test_debug!( + "atc = {}, null_prob = {}, null_prob_u256 = {}, sortition_hash: {}", + atc.to_hex(), + null_prob.to_hex(), + null_prob_u256.to_hex_be(), + sortition_hash + ); + null_sample_winner.range_start = Uint256::zero(); + null_sample_winner.range_end = null_prob_u256; + + burn_sample_winner.range_start = null_prob_u256; + burn_sample_winner.range_end = Uint256::max(); + + let burn_dist = [ + // the only fields that matter here are: + // * range_start + // * range_end + // * candidate + null_sample_winner, + burn_sample_winner, + ]; + + // pick the next winner + let Some(win_idx) = + BlockSnapshot::sample_burn_distribution(&burn_dist, &vrf_seed, sortition_hash) + else { + // miner wins by default if there's no winner index + return Ok(false); + }; + + test_debug!("win_idx = {}", win_idx); + + // null miner is index 0 + Ok(win_idx == 0) + } + /// Make a block snapshot from is block's data and the previous block. /// This process will: /// * calculate the new consensus hash @@ -286,10 +504,42 @@ impl BlockSnapshot { my_pox_id: &PoxId, parent_snapshot: &BlockSnapshot, block_header: &BurnchainBlockHeader, - burn_dist: &[BurnSamplePoint], - txids: &Vec, - block_burn_total: Option, + state_transition: &BurnchainStateTransition, + initial_mining_bonus_ustx: u128, + ) -> Result { + // what epoch will this snapshot be in? + let epoch_id = SortitionDB::get_stacks_epoch(sort_tx, parent_snapshot.block_height + 1)? + .unwrap_or_else(|| { + panic!( + "FATAL: no epoch defined at burn height {}", + parent_snapshot.block_height + 1 + ) + }) + .epoch_id; + + Self::make_snapshot_in_epoch( + sort_tx, + burnchain, + my_sortition_id, + my_pox_id, + parent_snapshot, + block_header, + state_transition, + initial_mining_bonus_ustx, + epoch_id, + ) + } + + pub fn make_snapshot_in_epoch( + sort_tx: &mut SortitionHandleTx, + burnchain: &Burnchain, + my_sortition_id: &SortitionId, + my_pox_id: &PoxId, + parent_snapshot: &BlockSnapshot, + block_header: &BurnchainBlockHeader, + state_transition: &BurnchainStateTransition, initial_mining_bonus_ustx: u128, + epoch_id: StacksEpochId, ) -> Result { assert_eq!( parent_snapshot.burn_header_hash, @@ -332,12 +582,12 @@ impl BlockSnapshot { first_block_height, last_burn_total, &next_sortition_hash, - &txids, + &state_transition.txids(), accumulated_coinbase_ustx, ) }; - if burn_dist.len() == 0 { + if state_transition.burn_dist.len() == 0 { // no burns happened debug!( "No burns happened in block"; @@ -350,7 +600,7 @@ impl BlockSnapshot { // NOTE: this only counts burns from leader block commits and user burns that match them. // It ignores user burns that don't match any block. - let block_burn_total = match block_burn_total { + let block_burn_total = match state_transition.total_burns() { Some(total) => { if total == 0 { // no one burned, so no sortition @@ -384,18 +634,76 @@ impl BlockSnapshot { }; // Try to pick a next block. - let winning_block = BlockSnapshot::select_winning_block( + let (winning_block, winning_block_burn_dist_index) = BlockSnapshot::select_winning_block( sort_tx, block_header, &next_sortition_hash, - burn_dist, + &state_transition.burn_dist, )? .expect("FATAL: there must be a winner if the burn distribution has 1 or more points"); + // in epoch 3.x and later (Nakamoto and later), there's two additional changes: + // * if the winning miner didn't mine in more than k of n blocks of the window, then their chances of + // winning are 0. + // * There exists a "null miner" that can win sortition, in which case there is no + // sortition. This happens if the assumed total commit with carry-over is sufficiently low. + let mut reject_winner_reason = None; + if epoch_id >= StacksEpochId::Epoch30 { + if !Self::check_miner_is_active( + epoch_id, + state_transition.windowed_block_commits.len(), + &winning_block.apparent_sender, + state_transition.burn_dist[winning_block_burn_dist_index].frequency, + ) { + reject_winner_reason = Some("Miner did not mine often enough to win".to_string()); + } + let (atc, null_active) = Self::get_miner_commit_carryover( + state_transition.total_burns(), + state_transition.windowed_median_burns(), + ); + if null_active && reject_winner_reason.is_none() { + // there's a chance the null miner can win + if Self::null_miner_wins( + sort_tx, + block_header, + &next_sortition_hash, + &winning_block, + atc, + )? { + // null wins + reject_winner_reason = Some( + "Null miner defeats block winner due to insufficient commit carryover" + .to_string(), + ); + } + } + } + + if let Some(reject_winner_reason) = reject_winner_reason { + info!("SORTITION({}): WINNER REJECTED: {}", block_height, &reject_winner_reason; + "txid" => %winning_block.txid, + "block_hash" => %winning_block.block_header_hash); + + // N.B. can't use `make_snapshot_no_sortition()` helper here because then `sort_tx` + // would be mutably borrowed twice. + return BlockSnapshot::make_snapshot_no_sortition( + sort_tx, + my_sortition_id, + my_pox_id, + parent_snapshot, + block_header, + first_block_height, + last_burn_total, + &next_sortition_hash, + &state_transition.txids(), + accumulated_coinbase_ustx, + ); + } + // mix in the winning block's VRF seed to the sortition hash. The next block commits must // prove on this final sortition hash. let final_sortition_hash = next_sortition_hash.mix_VRF_seed(&winning_block.new_seed); - let next_ops_hash = OpsHash::from_txids(&txids); + let next_ops_hash = OpsHash::from_txids(&state_transition.txids()); let next_ch = ConsensusHash::from_parent_block_data( sort_tx, &next_ops_hash, @@ -406,7 +714,7 @@ impl BlockSnapshot { my_pox_id, )?; - debug!( + info!( "SORTITION({}): WINNER IS {:?} (from {:?})", block_height, &winning_block.block_header_hash, &winning_block.txid ); @@ -461,8 +769,11 @@ mod test { use super::*; use crate::burnchains::tests::*; - use crate::burnchains::*; + use crate::burnchains::{BurnchainSigner, *}; + use crate::chainstate::burn::atc::AtcRational; + use crate::chainstate::burn::db::sortdb::tests::test_append_snapshot_with_winner; use crate::chainstate::burn::db::sortdb::*; + use crate::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; use crate::chainstate::burn::operations::*; use crate::chainstate::stacks::*; @@ -473,10 +784,8 @@ mod test { my_pox_id: &PoxId, parent_snapshot: &BlockSnapshot, block_header: &BurnchainBlockHeader, - burn_dist: &[BurnSamplePoint], - txids: &Vec, + burnchain_state_transition: &BurnchainStateTransition, ) -> Result { - let total_burn = BurnSamplePoint::get_total_burns(burn_dist); BlockSnapshot::make_snapshot( sort_tx, burnchain, @@ -484,9 +793,7 @@ mod test { my_pox_id, parent_snapshot, block_header, - burn_dist, - txids, - total_burn, + burnchain_state_transition, 0, ) } @@ -540,8 +847,7 @@ mod test { &pox_id, &initial_snapshot, &empty_block_header, - &vec![], - &vec![], + &BurnchainStateTransition::noop(), ) .unwrap(); sn @@ -567,6 +873,7 @@ mod test { 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, ]), + frequency: 10, candidate: LeaderBlockCommitOp::initial( &BlockHeaderHash([1u8; 32]), first_block_height + 1, @@ -594,8 +901,11 @@ mod test { &pox_id, &initial_snapshot, &empty_block_header, - &vec![empty_burn_point.clone()], - &vec![key.txid.clone()], + &BurnchainStateTransition { + burn_dist: vec![empty_burn_point.clone()], + accepted_ops: vec![BlockstackOperationType::LeaderKeyRegister(key.clone())], + ..BurnchainStateTransition::noop() + }, ) .unwrap(); sn @@ -604,4 +914,259 @@ mod test { assert!(!snapshot_no_burns.sortition); assert_eq!(snapshot_no_transactions.total_burn, 0); } + + #[test] + fn test_check_is_miner_active() { + assert_eq!(StacksEpochId::Epoch30.mining_commitment_frequency(), 3); + assert_eq!(StacksEpochId::Epoch25.mining_commitment_frequency(), 0); + + // reward phase + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 6, + &BurnchainSigner("".to_string()), + 6 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 6, + &BurnchainSigner("".to_string()), + 5 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 6, + &BurnchainSigner("".to_string()), + 4 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 6, + &BurnchainSigner("".to_string()), + 3 + )); + assert!(!BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 6, + &BurnchainSigner("".to_string()), + 2 + )); + + // prepare phase + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 1, + &BurnchainSigner("".to_string()), + 5 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 1, + &BurnchainSigner("".to_string()), + 4 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 1, + &BurnchainSigner("".to_string()), + 3 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 1, + &BurnchainSigner("".to_string()), + 2 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 1, + &BurnchainSigner("".to_string()), + 1 + )); + assert!(!BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 1, + &BurnchainSigner("".to_string()), + 0 + )); + } + + #[test] + fn test_get_miner_commit_carryover() { + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(None, None), + (AtcRational::zero(), false) + ); + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(None, Some(1)), + (AtcRational::zero(), false) + ); + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(Some(1), None), + (AtcRational::zero(), false) + ); + + // ATC increased + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(Some(1), Some(1)), + (AtcRational::one(), false) + ); + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(Some(2), Some(1)), + (AtcRational::one(), false) + ); + + // no carried commit + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(Some(2), Some(0)), + (AtcRational::zero(), true) + ); + + // assumed carryover + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(Some(2), Some(4)), + (AtcRational::frac(2, 4), true) + ); + } + + #[test] + fn test_null_miner_logistic() { + for i in 0..1024 { + let atc_u256 = ATC_LOOKUP[i]; + let null_miner_lgst = + BlockSnapshot::null_miner_logistic(AtcRational::frac(i as u64, 1024)); + assert_eq!(null_miner_lgst, atc_u256); + } + assert_eq!( + BlockSnapshot::null_miner_logistic(AtcRational::zero()), + ATC_LOOKUP[0] + ); + assert_eq!( + BlockSnapshot::null_miner_logistic(AtcRational::one()), + *ATC_LOOKUP.last().as_ref().cloned().unwrap() + ); + assert_eq!( + BlockSnapshot::null_miner_logistic(AtcRational::frac(100, 1)), + *ATC_LOOKUP.last().as_ref().cloned().unwrap() + ); + } + + /// This test runs 100 sortitions, and in each sortition, it verifies that the null miner will + /// win for the range of ATC-C values which put the sortition index into the null miner's + /// BurnSamplePoint range. The ATC-C values directly influence the null miner's + /// BurnSamplePoint range, so given a fixed sortition index, we can verify that the + /// `null_miner_wins()` function returns `true` exactly when the sortition index falls into the + /// null miner's range. The ATC-C values are sampled through linear interpolation between 0.0 + /// and 1.0 in steps of 0.01. + #[test] + fn test_null_miner_wins() { + let first_burn_hash = BurnchainHeaderHash([0xfe; 32]); + let parent_first_burn_hash = BurnchainHeaderHash([0xff; 32]); + let first_block_height = 120; + + let mut prev_block_header = BurnchainBlockHeader { + block_height: first_block_height, + block_hash: first_burn_hash.clone(), + parent_block_hash: parent_first_burn_hash.clone(), + num_txs: 0, + timestamp: 12345, + }; + + let burnchain = Burnchain { + pox_constants: PoxConstants::test_default(), + peer_version: 0x012345678, + network_id: 0x9abcdef0, + chain_name: "bitcoin".to_string(), + network_name: "testnet".to_string(), + working_dir: "/nope".to_string(), + consensus_hash_lifetime: 24, + stable_confirmations: 7, + first_block_timestamp: 0, + first_block_height, + initial_reward_start_block: first_block_height, + first_block_hash: first_burn_hash.clone(), + }; + + let mut db = SortitionDB::connect_test(first_block_height, &first_burn_hash).unwrap(); + + for i in 0..100 { + let header = BurnchainBlockHeader { + block_height: prev_block_header.block_height + 1, + block_hash: BurnchainHeaderHash([i as u8; 32]), + parent_block_hash: prev_block_header.block_hash.clone(), + num_txs: 0, + timestamp: prev_block_header.timestamp + (i as u64) + 1, + }; + + let sortition_hash = SortitionHash([i as u8; 32]); + + let commit_winner = LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: BlockHeaderHash([i as u8; 32]), + new_seed: VRFSeed([i as u8; 32]), + parent_block_ptr: 0, + parent_vtxindex: 0, + key_block_ptr: 0, + key_vtxindex: 0, + memo: vec![0x80], + commit_outs: vec![], + + burn_fee: 100, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner(format!("signer {}", i)), + txid: Txid([i as u8; 32]), + vtxindex: 0, + block_height: header.block_height, + burn_parent_modulus: (i % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: header.block_hash.clone(), + }; + + let tip = SortitionDB::get_canonical_burn_chain_tip(db.conn()).unwrap(); + test_append_snapshot_with_winner( + &mut db, + header.block_hash.clone(), + &vec![BlockstackOperationType::LeaderBlockCommit( + commit_winner.clone(), + )], + Some(tip), + Some(commit_winner.clone()), + ); + + let mut sort_tx = db.tx_begin_at_tip(); + + for j in 0..100 { + let atc = AtcRational::from_f64_unit((j as f64) / 100.0); + let null_prob = BlockSnapshot::null_miner_probability(atc); + + // NOTE: this tests .into_sortition_probability() + let null_prob_u256 = if null_prob.inner() >= AtcRational::one().inner() { + // prevent left-shift overflow + AtcRational::one_sup().into_inner() << 192 + } else { + null_prob.into_inner() << 192 + }; + + let null_wins = BlockSnapshot::null_miner_wins( + &mut sort_tx, + &header, + &sortition_hash, + &commit_winner, + atc, + ) + .unwrap(); + debug!("null_wins: {},{}: {}", i, j, null_wins); + + let vrf_seed = BlockSnapshot::get_last_vrf_seed(&mut sort_tx, &header).unwrap(); + let index = sortition_hash.mix_VRF_seed(&vrf_seed).to_uint256(); + + if index < null_prob_u256 { + assert!(null_wins); + } else { + assert!(!null_wins); + } + } + + prev_block_header = header.clone(); + } + } } diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index e54e9f0205..96eae44641 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -1603,6 +1603,7 @@ impl< /// block can be re-processed in that event. fn undo_stacks_block_orphaning( burnchain_conn: &DBConn, + burnchain_indexer: &B, ic: &SortitionDBConn, chainstate_db_tx: &mut DBTx, first_invalidate_start_block: u64, @@ -1613,8 +1614,11 @@ impl< first_invalidate_start_block, last_invalidate_start_block ); for burn_height in first_invalidate_start_block..(last_invalidate_start_block + 1) { - let burn_header = match BurnchainDB::get_burnchain_header(burnchain_conn, burn_height)? - { + let burn_header = match BurnchainDB::get_burnchain_header( + burnchain_conn, + burnchain_indexer, + burn_height, + )? { Some(hdr) => hdr, None => { continue; @@ -1840,6 +1844,7 @@ impl< // sortitions let revalidated_burn_header = BurnchainDB::get_burnchain_header( self.burnchain_blocks_db.conn(), + &self.burnchain_indexer, first_invalidate_start_block - 1, ) .expect("FATAL: failed to read burnchain DB") @@ -1854,6 +1859,7 @@ impl< // invalidate all descendant sortitions, no matter what. let invalidated_burn_header = BurnchainDB::get_burnchain_header( self.burnchain_blocks_db.conn(), + &self.burnchain_indexer, last_invalidate_start_block - 1, ) .expect("FATAL: failed to read burnchain DB") @@ -2045,6 +2051,7 @@ impl< // un-orphan blocks that had been orphaned but were tied to this now-revalidated sortition history Self::undo_stacks_block_orphaning( &self.burnchain_blocks_db.conn(), + &self.burnchain_indexer, &ic, &mut chainstate_db_tx, first_invalidate_start_block, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index fffa64da3c..0f3abe5c29 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -42,6 +42,7 @@ use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::pox_4_tests::{get_stacking_minimum, get_tip}; use crate::chainstate::stacks::boot::signers_tests::{readonly_call, readonly_call_with_sortdb}; use crate::chainstate::stacks::boot::test::{ key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, @@ -823,15 +824,19 @@ fn test_nakamoto_chainstate_getters() { .unwrap() .is_some()); - // this should fail, since it's not idempotent -- the highest tenure _is_ this tenure - assert!(NakamotoChainState::check_nakamoto_tenure( - chainstate.db(), - &mut sort_tx, - &blocks[0].header, - &tenure_change_payload, - ) - .unwrap() - .is_none()); + // this should return the previous tenure + assert_eq!( + NakamotoChainState::check_nakamoto_tenure( + chainstate.db(), + &mut sort_tx, + &blocks[0].header, + &tenure_change_payload, + ) + .unwrap() + .unwrap() + .tenure_id_consensus_hash, + tenure_change_payload.prev_tenure_consensus_hash + ); let cur_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sort_tx.sqlite()).unwrap(); let (cur_stacks_ch, cur_stacks_bhh, cur_stacks_height) = @@ -853,14 +858,18 @@ fn test_nakamoto_chainstate_getters() { .unwrap(); // check works (this would be the first tenure) - assert!(NakamotoChainState::check_nakamoto_tenure( - chainstate.db(), - &mut sort_tx, - &blocks[0].header, - &tenure_change_payload, - ) - .unwrap() - .is_some()); + assert_eq!( + NakamotoChainState::check_nakamoto_tenure( + chainstate.db(), + &mut sort_tx, + &blocks[0].header, + &tenure_change_payload, + ) + .unwrap() + .unwrap() + .tenure_id_consensus_hash, + tenure_change_payload.prev_tenure_consensus_hash + ); // restore sort_tx @@ -1056,24 +1065,32 @@ fn test_nakamoto_chainstate_getters() { ) .unwrap(); - assert!(NakamotoChainState::check_nakamoto_tenure( - chainstate.db(), - &mut sort_tx, - &new_blocks[0].header, - &tenure_change_payload, - ) - .unwrap() - .is_some()); + assert_eq!( + NakamotoChainState::check_nakamoto_tenure( + chainstate.db(), + &mut sort_tx, + &new_blocks[0].header, + &tenure_change_payload, + ) + .unwrap() + .unwrap() + .tenure_id_consensus_hash, + tenure_change_payload.prev_tenure_consensus_hash + ); - // checks on older confired tenures continue to fail - assert!(NakamotoChainState::check_nakamoto_tenure( - chainstate.db(), - &mut sort_tx, - &blocks[0].header, - &old_tenure_change_payload, - ) - .unwrap() - .is_none()); + // checks on older confired tenures return the prev tenure + assert_eq!( + NakamotoChainState::check_nakamoto_tenure( + chainstate.db(), + &mut sort_tx, + &blocks[0].header, + &old_tenure_change_payload, + ) + .unwrap() + .unwrap() + .tenure_id_consensus_hash, + old_tenure_change_payload.prev_tenure_consensus_hash + ); // restore sort_tx diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 33ee265369..ab9ae6a5f9 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -176,6 +176,7 @@ impl NakamotoBlockBuilder { total_burn: u64, tenure_change: Option<&StacksTransaction>, coinbase: Option<&StacksTransaction>, + bitvec_len: u16, ) -> Result { let next_height = parent_stacks_header .anchored_header @@ -208,6 +209,7 @@ impl NakamotoBlockBuilder { total_burn, tenure_id_consensus_hash.clone(), parent_stacks_header.index_block_hash(), + bitvec_len, ), }) } @@ -406,6 +408,7 @@ impl NakamotoBlockBuilder { settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, signer_transactions: Vec, + signer_bitvec_len: u16, ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { let (tip_consensus_hash, tip_block_hash, tip_height) = ( parent_stacks_header.consensus_hash.clone(), @@ -426,6 +429,7 @@ impl NakamotoBlockBuilder { total_burn, tenure_info.tenure_change_tx(), tenure_info.coinbase_tx(), + signer_bitvec_len, )?; let ts_start = get_epoch_time_ms(); @@ -507,43 +511,6 @@ impl NakamotoBlockBuilder { pub fn get_bytes_so_far(&self) -> u64 { self.bytes_so_far } - - /// Make a StackerDB chunk message containing a proposed block. - /// Sign it with the miner's private key. - /// Automatically determine which StackerDB slot and version number to use. - /// Returns Some(chunk) if the given key corresponds to one of the expected miner slots - /// Returns None if not - /// Returns an error on signing or DB error - pub fn make_stackerdb_block_proposal( - sortdb: &SortitionDB, - tip: &BlockSnapshot, - stackerdbs: &StackerDBs, - block: &T, - miner_privkey: &StacksPrivateKey, - miners_contract_id: &QualifiedContractIdentifier, - ) -> Result, Error> { - let miner_pubkey = StacksPublicKey::from_private(&miner_privkey); - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &miner_pubkey)? - else { - // No slot exists for this miner - return Ok(None); - }; - // proposal slot is the first slot. - let slot_id = slot_range.start; - // Get the LAST slot version number written to the DB. If not found, use 0. - // Add 1 to get the NEXT version number - // Note: we already check above for the slot's existence - let slot_version = stackerdbs - .get_slot_version(&miners_contract_id, slot_id)? - .unwrap_or(0) - .saturating_add(1); - let block_bytes = block.serialize_to_vec(); - let mut chunk = StackerDBChunkData::new(slot_id, slot_version, block_bytes); - chunk - .sign(miner_privkey) - .map_err(|_| net_error::SigningError("Failed to sign StackerDB chunk".into()))?; - Ok(Some(chunk)) - } } impl BlockBuilder for NakamotoBlockBuilder { diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 724f3681cd..2cdf93eef5 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -152,7 +152,7 @@ lazy_static! { block_height INTEGER NOT NULL, -- root hash of the internal, not-consensus-critical MARF that allows us to track chainstate/fork metadata index_root TEXT NOT NULL, - -- burn header hash corresponding to the consensus hash (NOT guaranteed to be unique, since we can + -- burn header hash corresponding to the consensus hash (NOT guaranteed to be unique, since we can -- have 2+ blocks per burn block if there's a PoX fork) burn_header_hash TEXT NOT NULL, -- height of the burnchain block header that generated this consensus hash @@ -188,7 +188,7 @@ lazy_static! { header_type TEXT NOT NULL, -- hash of the block block_hash TEXT NOT NULL, - -- index_block_hash is the hash of the block hash and consensus hash of the burn block that selected it, + -- index_block_hash is the hash of the block hash and consensus hash of the burn block that selected it, -- and is guaranteed to be globally unique (across all Stacks forks and across all PoX forks). -- index_block_hash is the block hash fed into the MARF index. index_block_hash TEXT NOT NULL, @@ -503,6 +503,7 @@ impl NakamotoBlockHeader { burn_spent: u64, consensus_hash: ConsensusHash, parent_block_id: StacksBlockId, + bitvec_len: u16, ) -> NakamotoBlockHeader { NakamotoBlockHeader { version: NAKAMOTO_BLOCK_VERSION, @@ -514,7 +515,8 @@ impl NakamotoBlockHeader { state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), + signer_bitvec: BitVec::ones(bitvec_len) + .expect("BUG: bitvec of length-1 failed to construct"), } } @@ -1826,6 +1828,11 @@ impl NakamotoChainState { .ok_or(ChainstateError::DBError(DBError::NotFoundError))?; let aggregate_key_block_header = Self::get_canonical_block_header(chainstate.db(), sortdb)?.unwrap(); + let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), block_sn.block_height)? + .ok_or(ChainstateError::InvalidStacksBlock( + "Failed to get epoch ID".into(), + ))? + .epoch_id; let aggregate_public_key = Self::load_aggregate_public_key( sortdb, @@ -1833,7 +1840,7 @@ impl NakamotoChainState { chainstate, block_sn.block_height, &aggregate_key_block_header.index_block_hash(), - true, + epoch_id >= StacksEpochId::Epoch30, )?; Ok(aggregate_public_key) } @@ -2586,6 +2593,27 @@ impl NakamotoChainState { "parent_header_hash" => %parent_header_hash, ); + if new_tenure { + clarity_tx + .connection() + .as_free_transaction(|clarity_tx_conn| { + clarity_tx_conn.with_clarity_db(|db| { + db.set_tenure_height( + coinbase_height + .try_into() + .expect("Tenure height overflowed 32-bit range"), + )?; + Ok(()) + }) + }) + .map_err(|e| { + error!("Failed to set tenure height during block setup"; + "error" => ?e, + ); + e + })?; + } + let evaluated_epoch = clarity_tx.get_epoch(); let auto_unlock_events = if evaluated_epoch >= StacksEpochId::Epoch21 { @@ -2982,6 +3010,7 @@ impl NakamotoChainState { debug!( "Append nakamoto block"; "block" => format!("{}/{block_hash}", block.header.consensus_hash), + "block_id" => %block.header.block_id(), "parent_block" => %block.header.parent_block_id, "stacks_height" => next_block_height, "total_burns" => block.header.burn_spent, diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index 5049286908..e776ca41db 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -514,7 +514,7 @@ impl NakamotoSigners { return false; } if origin_nonce < *account_nonce { - debug!("valid_vote_transaction: Received a transaction with an outdated nonce ({account_nonce} < {origin_nonce})."); + debug!("valid_vote_transaction: Received a transaction with an outdated nonce ({origin_nonce} < {account_nonce})."); return false; } Self::parse_vote_for_aggregate_public_key(transaction).is_some() diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 7389c03337..c9e5c0cf59 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -528,6 +528,17 @@ impl NakamotoChainState { } } + /// Get the nakamoto tenure by id + pub fn get_nakamoto_tenure_change_by_tenure_id( + headers_conn: &Connection, + tenure_consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; + let args: &[&dyn ToSql] = &[&tenure_consensus_hash]; + let tenure_opt: Option = query_row(headers_conn, sql, args)?; + Ok(tenure_opt) + } + /// Get a nakamoto tenure-change by its tenure ID consensus hash. /// Get the highest such record. It will be the last-processed BlockFound tenure /// for the given sortition consensus hash. @@ -544,7 +555,7 @@ impl NakamotoChainState { Ok(tenure_opt) } - /// Get the highest processed tenure on the canonical sortition history. + /// Get the highest non-empty processed tenure on the canonical sortition history. pub fn get_highest_nakamoto_tenure( headers_conn: &Connection, sortdb_conn: &Connection, @@ -555,10 +566,7 @@ impl NakamotoChainState { // no chain tip, so no tenure return Ok(None); } - let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[&tip_ch]; - let tenure_opt: Option = query_row(headers_conn, sql, args)?; - Ok(tenure_opt) + Self::get_nakamoto_tenure_change_by_tenure_id(headers_conn, &tip_ch) } /// Verify that a tenure change tx is a valid first-ever tenure change. It must connect to an @@ -655,7 +663,7 @@ impl NakamotoChainState { /// * previous_tenure_blocks /// * cause /// - /// Returns Ok(Some(highest-processed-tenure)) on success + /// Returns Ok(Some(processed-tenure)) on success /// Returns Ok(None) if the tenure change is invalid /// Returns Err(..) on DB error pub(crate) fn check_nakamoto_tenure( @@ -742,10 +750,13 @@ impl NakamotoChainState { return Ok(None); } - let Some(highest_processed_tenure) = - Self::get_highest_nakamoto_tenure(headers_conn, sort_handle.sqlite())? + // Note in the extend case, this will actually return the current tenure, not the parent as prev_tenure_consensus_hash will be the same as tenure_consensus_hash + let Some(tenure) = Self::get_nakamoto_tenure_change_by_tenure_id( + headers_conn, + &tenure_payload.prev_tenure_consensus_hash, + )? else { - // no previous tenures. This is the first tenure change. It should point to an epoch + // not building off of a previous Nakamoto tenure. This is the first tenure change. It should point to an epoch // 2.x block. return Self::check_first_nakamoto_tenure_change(headers_conn, tenure_payload); }; @@ -764,84 +775,34 @@ impl NakamotoChainState { ); return Ok(None); } - if tenure_payload.burn_view_consensus_hash - == highest_processed_tenure.burn_view_consensus_hash - { - // if we're extending tenure within the same sortition, then the tenure and - // prev_tenure consensus hashes must match that of the highest. - if highest_processed_tenure.tenure_id_consensus_hash - != tenure_payload.tenure_consensus_hash - || highest_processed_tenure.tenure_id_consensus_hash - != tenure_payload.prev_tenure_consensus_hash - { - warn!("Invalid tenure-change: tenure extension within the same sortition tries to override the highest sortition"; - "tenure_consensus_hash" => %tenure_payload.tenure_consensus_hash, - "prev_tenure_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash, - "highest_processed_tenure.consensus_hash" => %highest_processed_tenure.tenure_id_consensus_hash, - "highest_processed_tenure.prev_consensus_hash" => %highest_processed_tenure.prev_tenure_id_consensus_hash - ); - return Ok(None); - } - } } - } - - let Some(last_tenure_finish_block_id) = Self::get_nakamoto_tenure_finish_block_header( - headers_conn, - &highest_processed_tenure.tenure_id_consensus_hash, - )? - .map(|hdr| hdr.index_block_hash()) else { - // last tenure doesn't exist (should be unreachable) - warn!("Invalid tenure-change: no blocks found for highest processed tenure"; - "consensus_hash" => %highest_processed_tenure.tenure_id_consensus_hash, - ); - return Ok(None); }; - // must build atop the highest-processed tenure. - // NOTE: for tenure-extensions, the second check is always false, since the tenure and - // prev-tenure consensus hashes must be the same per the above check. - if last_tenure_finish_block_id != tenure_payload.previous_tenure_end - || highest_processed_tenure.tenure_id_consensus_hash - != tenure_payload.prev_tenure_consensus_hash - { - // not continuous -- this tenure-change does not point to the end of the - // last-processed tenure, or does not point to the last-processed tenure's sortition - warn!("Invalid tenure-change: discontiguous"; - "tenure_consensus_hash" => %tenure_payload.tenure_consensus_hash, - "prev_tenure_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash, - "highest_processed_tenure.consensus_hash" => %highest_processed_tenure.tenure_id_consensus_hash, - "last_tenure_finish_block_id" => %last_tenure_finish_block_id, - "tenure_payload.previous_tenure_end" => %tenure_payload.previous_tenure_end - ); - return Ok(None); - } - - // The tenure-change must report the number of blocks _so far_ in the current tenure. If - // there is a succession of tenure-extensions for a given tenure, then the reported tenure + // The tenure-change must report the number of blocks _so far_ in the previous tenure (note if this is a TenureChangeCause::Extended, then its parent tenure will be its own tenure). + // If there is a succession of tenure-extensions for a given tenure, then the reported tenure // length must report the number of blocks since the last _sortition-induced_ tenure // change. let tenure_len = Self::get_nakamoto_tenure_length( headers_conn, - &highest_processed_tenure.tenure_id_consensus_hash, + &tenure_payload.prev_tenure_consensus_hash, )?; if tenure_len != tenure_payload.previous_tenure_blocks { // invalid -- does not report the correct number of blocks in the past tenure warn!("Invalid tenure-change: wrong number of blocks"; "tenure_consensus_hash" => %tenure_payload.tenure_consensus_hash, - "highest_processed_tenure.consensus_hash" => %highest_processed_tenure.tenure_id_consensus_hash, + "prev_tenure_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash, "tenure_len" => tenure_len, "tenure_payload.previous_tenure_blocks" => tenure_payload.previous_tenure_blocks ); return Ok(None); } - Ok(Some(highest_processed_tenure)) + Ok(Some(tenure)) } /// Advance the tenures table with a validated block's tenure data. /// This applies to both tenure-changes and tenure-extends. - /// Returns the highest tenure-change height (this is parent_coinbase_height + 1 if there was a + /// Returns the tenure-change height (this is parent_coinbase_height + 1 if there was a /// tenure-change tx, or just parent_coinbase_height if there was a tenure-extend tx or no tenure /// txs at all). /// TODO: unit test @@ -869,7 +830,7 @@ impl NakamotoChainState { } }; - let Some(highest_processed_tenure) = + let Some(processed_tenure) = Self::check_nakamoto_tenure(headers_tx, sort_tx, &block.header, tenure_payload)? else { return Err(ChainstateError::InvalidStacksTransaction( @@ -882,7 +843,7 @@ impl NakamotoChainState { headers_tx, &block.header, coinbase_height, - highest_processed_tenure + processed_tenure .tenure_index .checked_add(1) .expect("too many tenure-changes"), diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 9844fa7b74..f8d048aaf1 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -23,6 +23,7 @@ use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::StacksAddressExtensions; use clarity::vm::Value; +use libstackerdb::StackerDBChunkData; use rand::{thread_rng, RngCore}; use rusqlite::{Connection, ToSql}; use stacks_common::address::AddressHashMode; @@ -2019,31 +2020,26 @@ fn test_make_miners_stackerdb_config() { txs: vec![], }; let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let miner_privkey = &miner_keys[i]; + let miner_pubkey = StacksPublicKey::from_private(miner_privkey); + let slot_id = NakamotoChainState::get_miner_slot(&sort_db, &tip, &miner_pubkey) + .expect("Failed to get miner slot"); if sortition { - let chunk = NakamotoBlockBuilder::make_stackerdb_block_proposal( - &sort_db, - &tip, - &stackerdbs, - &block, - &miner_keys[i], - &miners_contract_id, - ) - .unwrap() - .unwrap(); + let slot_id = slot_id.expect("No miner slot exists for this miner").start; + let slot_version = stackerdbs + .get_slot_version(&miners_contract_id, slot_id) + .expect("Failed to get slot version") + .unwrap_or(0) + .saturating_add(1); + let block_bytes = block.serialize_to_vec(); + let mut chunk = StackerDBChunkData::new(slot_id, slot_version, block_bytes); + chunk.sign(&miner_keys[i]).expect("Failed to sign chunk"); assert_eq!(chunk.slot_version, 1); assert_eq!(chunk.data, block.serialize_to_vec()); stackerdb_chunks.push(chunk); } else { - assert!(NakamotoBlockBuilder::make_stackerdb_block_proposal( - &sort_db, - &tip, - &stackerdbs, - &block, - &miner_keys[i], - &miners_contract_id, - ) - .unwrap() - .is_none()); + // We are not a miner anymore and should not have any slot + assert!(slot_id.is_none()); } } // miners are "stable" across snapshots diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 570b0cc3d3..b2b275a0e1 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -547,6 +547,7 @@ impl TestStacksNode { } else { None }, + 1, ) .unwrap() } else { diff --git a/stackslib/src/chainstate/stacks/auth.rs b/stackslib/src/chainstate/stacks/auth.rs index d2981683c0..06cf64d037 100644 --- a/stackslib/src/chainstate/stacks/auth.rs +++ b/stackslib/src/chainstate/stacks/auth.rs @@ -23,19 +23,20 @@ use stacks_common::codec::{ read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, }; use stacks_common::types::chainstate::StacksAddress; -use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::types::{StacksEpochId, StacksPublicKeyBuffer}; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::retry::{BoundReader, RetryReader}; use stacks_common::util::secp256k1::{MessageSignature, MESSAGE_SIGNATURE_ENCODED_SIZE}; use crate::burnchains::{PrivateKey, PublicKey, Txid}; use crate::chainstate::stacks::{ - Error, MultisigHashMode, MultisigSpendingCondition, SinglesigHashMode, - SinglesigSpendingCondition, StacksPrivateKey, StacksPublicKey, TransactionAuth, - TransactionAuthField, TransactionAuthFieldID, TransactionAuthFlags, - TransactionPublicKeyEncoding, TransactionSpendingCondition, - C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + Error, MultisigHashMode, MultisigSpendingCondition, OrderIndependentMultisigHashMode, + OrderIndependentMultisigSpendingCondition, SinglesigHashMode, SinglesigSpendingCondition, + StacksPrivateKey, StacksPublicKey, TransactionAuth, TransactionAuthField, + TransactionAuthFieldID, TransactionAuthFlags, TransactionPublicKeyEncoding, + TransactionSpendingCondition, C32_ADDRESS_VERSION_MAINNET_MULTISIG, + C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_MULTISIG, + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use crate::net::{Error as net_error, STACKS_PUBLIC_KEY_ENCODED_SIZE}; @@ -314,6 +315,204 @@ impl MultisigSpendingCondition { } } +impl StacksMessageCodec for OrderIndependentMultisigSpendingCondition { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &(self.hash_mode.clone() as u8))?; + write_next(fd, &self.signer)?; + write_next(fd, &self.nonce)?; + write_next(fd, &self.tx_fee)?; + write_next(fd, &self.fields)?; + write_next(fd, &self.signatures_required)?; + Ok(()) + } + + fn consensus_deserialize( + fd: &mut R, + ) -> Result { + let hash_mode_u8: u8 = read_next(fd)?; + let hash_mode = OrderIndependentMultisigHashMode::from_u8(hash_mode_u8).ok_or( + codec_error::DeserializeError(format!( + "Failed to parse multisig spending condition: unknown hash mode {}", + hash_mode_u8 + )), + )?; + + let signer: Hash160 = read_next(fd)?; + let nonce: u64 = read_next(fd)?; + let tx_fee: u64 = read_next(fd)?; + let fields: Vec = { + let mut bound_read = BoundReader::from_reader(fd, MAX_MESSAGE_LEN as u64); + read_next(&mut bound_read) + }?; + + let signatures_required: u16 = read_next(fd)?; + + // read and decode _exactly_ num_signatures signature buffers + let mut num_sigs_given: u16 = 0; + let mut have_uncompressed = false; + for f in fields.iter() { + match *f { + TransactionAuthField::Signature(ref key_encoding, _) => { + num_sigs_given = + num_sigs_given + .checked_add(1) + .ok_or(codec_error::DeserializeError( + "Failed to parse order independent multisig spending condition: too many signatures" + .to_string(), + ))?; + if *key_encoding == TransactionPublicKeyEncoding::Uncompressed { + have_uncompressed = true; + } + } + TransactionAuthField::PublicKey(ref pubk) => { + if !pubk.compressed() { + have_uncompressed = true; + } + } + }; + } + + // must be given the right number of signatures + if num_sigs_given < signatures_required { + let msg = format!( + "Failed to deserialize order independent multisig spending condition: got {num_sigs_given} sigs, expected at least {signatures_required}" + ); + test_debug!("{msg}"); + return Err(codec_error::DeserializeError(msg)); + } + + // must all be compressed if we're using P2WSH + if have_uncompressed && hash_mode == OrderIndependentMultisigHashMode::P2WSH { + let msg = format!( + "Failed to deserialize order independent multisig spending condition: expected compressed keys only" + ); + test_debug!("{msg}"); + return Err(codec_error::DeserializeError(msg)); + } + + Ok(OrderIndependentMultisigSpendingCondition { + signer, + nonce, + tx_fee, + hash_mode, + fields, + signatures_required, + }) + } +} + +impl OrderIndependentMultisigSpendingCondition { + pub fn push_signature( + &mut self, + key_encoding: TransactionPublicKeyEncoding, + signature: MessageSignature, + ) -> () { + self.fields + .push(TransactionAuthField::Signature(key_encoding, signature)); + } + + pub fn push_public_key(&mut self, public_key: StacksPublicKey) -> () { + self.fields + .push(TransactionAuthField::PublicKey(public_key)); + } + + pub fn pop_auth_field(&mut self) -> Option { + self.fields.pop() + } + + pub fn address_mainnet(&self) -> StacksAddress { + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: self.signer.clone(), + } + } + + pub fn address_testnet(&self) -> StacksAddress { + StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_MULTISIG, + bytes: self.signer.clone(), + } + } + + /// Authenticate a spending condition against an initial sighash. + /// In doing so, recover all public keys and verify that they hash to the signer + /// via the given hash mode. + pub fn verify( + &self, + initial_sighash: &Txid, + cond_code: &TransactionAuthFlags, + ) -> Result { + let mut pubkeys = vec![]; + let mut num_sigs: u16 = 0; + let mut have_uncompressed = false; + for field in self.fields.iter() { + let pubkey = match field { + TransactionAuthField::PublicKey(ref pubkey) => { + if !pubkey.compressed() { + have_uncompressed = true; + } + pubkey.clone() + } + TransactionAuthField::Signature(ref pubkey_encoding, ref sigbuf) => { + if *pubkey_encoding == TransactionPublicKeyEncoding::Uncompressed { + have_uncompressed = true; + } + + let (pubkey, _next_sighash) = TransactionSpendingCondition::next_verification( + &initial_sighash, + cond_code, + self.tx_fee, + self.nonce, + pubkey_encoding, + sigbuf, + )?; + num_sigs = num_sigs + .checked_add(1) + .ok_or(net_error::VerifyingError("Too many signatures".to_string()))?; + pubkey + } + }; + pubkeys.push(pubkey); + } + + if num_sigs < self.signatures_required { + return Err(net_error::VerifyingError(format!( + "Not enough signatures. Got {num_sigs}, expected at least {req}", + req = self.signatures_required + ))); + } + + if have_uncompressed && self.hash_mode == OrderIndependentMultisigHashMode::P2WSH { + return Err(net_error::VerifyingError( + "Uncompressed keys are not allowed in this hash mode".to_string(), + )); + } + + let addr_bytes = match StacksAddress::from_public_keys( + 0, + &self.hash_mode.to_address_hash_mode(), + self.signatures_required as usize, + &pubkeys, + ) { + Some(a) => a.bytes, + None => { + return Err(net_error::VerifyingError( + "Failed to generate address from public keys".to_string(), + )); + } + }; + + if addr_bytes != self.signer { + return Err(net_error::VerifyingError(format!( + "Signer hash does not equal hash of public key(s): {} != {}", + addr_bytes, self.signer + ))); + } + + Ok(initial_sighash.clone()) + } +} + impl StacksMessageCodec for SinglesigSpendingCondition { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &(self.hash_mode.clone() as u8))?; @@ -461,6 +660,9 @@ impl StacksMessageCodec for TransactionSpendingCondition { TransactionSpendingCondition::Multisig(ref data) => { data.consensus_serialize(fd)?; } + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + data.consensus_serialize(fd)?; + } } Ok(()) } @@ -479,6 +681,10 @@ impl StacksMessageCodec for TransactionSpendingCondition { } else if MultisigHashMode::from_u8(hash_mode_u8).is_some() { let cond = MultisigSpendingCondition::consensus_deserialize(&mut rrd)?; TransactionSpendingCondition::Multisig(cond) + } else if OrderIndependentMultisigHashMode::from_u8(hash_mode_u8).is_some() { + let cond = + OrderIndependentMultisigSpendingCondition::consensus_deserialize(&mut rrd)?; + TransactionSpendingCondition::OrderIndependentMultisig(cond) } else { test_debug!("Invalid address hash mode {}", hash_mode_u8); return Err(codec_error::DeserializeError(format!( @@ -504,11 +710,11 @@ impl TransactionSpendingCondition { Some(TransactionSpendingCondition::Singlesig( SinglesigSpendingCondition { - signer: signer_addr.bytes.clone(), + signer: signer_addr.bytes, nonce: 0, tx_fee: 0, hash_mode: SinglesigHashMode::P2PKH, - key_encoding: key_encoding, + key_encoding, signature: MessageSignature::empty(), }, )) @@ -524,7 +730,7 @@ impl TransactionSpendingCondition { Some(TransactionSpendingCondition::Singlesig( SinglesigSpendingCondition { - signer: signer_addr.bytes.clone(), + signer: signer_addr.bytes, nonce: 0, tx_fee: 0, hash_mode: SinglesigHashMode::P2WPKH, @@ -541,13 +747,13 @@ impl TransactionSpendingCondition { let signer_addr = StacksAddress::from_public_keys( 0, &AddressHashMode::SerializeP2SH, - num_sigs as usize, + usize::from(num_sigs), &pubkeys, )?; Some(TransactionSpendingCondition::Multisig( MultisigSpendingCondition { - signer: signer_addr.bytes.clone(), + signer: signer_addr.bytes, nonce: 0, tx_fee: 0, hash_mode: MultisigHashMode::P2SH, @@ -557,6 +763,52 @@ impl TransactionSpendingCondition { )) } + pub fn new_multisig_order_independent_p2sh( + num_sigs: u16, + pubkeys: Vec, + ) -> Option { + let signer_addr = StacksAddress::from_public_keys( + 0, + &AddressHashMode::SerializeP2SH, + usize::from(num_sigs), + &pubkeys, + )?; + + Some(TransactionSpendingCondition::OrderIndependentMultisig( + OrderIndependentMultisigSpendingCondition { + signer: signer_addr.bytes, + nonce: 0, + tx_fee: 0, + hash_mode: OrderIndependentMultisigHashMode::P2SH, + fields: vec![], + signatures_required: num_sigs, + }, + )) + } + + pub fn new_multisig_order_independent_p2wsh( + num_sigs: u16, + pubkeys: Vec, + ) -> Option { + let signer_addr = StacksAddress::from_public_keys( + 0, + &AddressHashMode::SerializeP2WSH, + usize::from(num_sigs), + &pubkeys, + )?; + + Some(TransactionSpendingCondition::OrderIndependentMultisig( + OrderIndependentMultisigSpendingCondition { + signer: signer_addr.bytes, + nonce: 0, + tx_fee: 0, + hash_mode: OrderIndependentMultisigHashMode::P2WSH, + fields: vec![], + signatures_required: num_sigs, + }, + )) + } + pub fn new_multisig_p2wsh( num_sigs: u16, pubkeys: Vec, @@ -564,13 +816,13 @@ impl TransactionSpendingCondition { let signer_addr = StacksAddress::from_public_keys( 0, &AddressHashMode::SerializeP2WSH, - num_sigs as usize, + usize::from(num_sigs), &pubkeys, )?; Some(TransactionSpendingCondition::Multisig( MultisigSpendingCondition { - signer: signer_addr.bytes.clone(), + signer: signer_addr.bytes, nonce: 0, tx_fee: 0, hash_mode: MultisigHashMode::P2WSH, @@ -614,6 +866,17 @@ impl TransactionSpendingCondition { } num_sigs } + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + let mut num_sigs: u16 = 0; + for field in data.fields.iter() { + if field.is_signature() { + num_sigs = num_sigs + .checked_add(1) + .expect("Unreasonable amount of signatures"); // something is seriously wrong if this fails + } + } + num_sigs + } } } @@ -623,6 +886,9 @@ impl TransactionSpendingCondition { TransactionSpendingCondition::Multisig(ref multisig_data) => { multisig_data.signatures_required } + TransactionSpendingCondition::OrderIndependentMultisig(ref multisig_data) => { + multisig_data.signatures_required + } } } @@ -630,6 +896,7 @@ impl TransactionSpendingCondition { match *self { TransactionSpendingCondition::Singlesig(ref data) => data.nonce, TransactionSpendingCondition::Multisig(ref data) => data.nonce, + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => data.nonce, } } @@ -637,6 +904,7 @@ impl TransactionSpendingCondition { match *self { TransactionSpendingCondition::Singlesig(ref data) => data.tx_fee, TransactionSpendingCondition::Multisig(ref data) => data.tx_fee, + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => data.tx_fee, } } @@ -648,6 +916,9 @@ impl TransactionSpendingCondition { TransactionSpendingCondition::Multisig(ref mut multisig_data) => { multisig_data.nonce = n; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut multisig_data) => { + multisig_data.nonce = n; + } } } @@ -659,6 +930,9 @@ impl TransactionSpendingCondition { TransactionSpendingCondition::Multisig(ref mut multisig_data) => { multisig_data.tx_fee = tx_fee; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut multisig_data) => { + multisig_data.tx_fee = tx_fee; + } } } @@ -666,6 +940,9 @@ impl TransactionSpendingCondition { match *self { TransactionSpendingCondition::Singlesig(ref singlesig_data) => singlesig_data.tx_fee, TransactionSpendingCondition::Multisig(ref multisig_data) => multisig_data.tx_fee, + TransactionSpendingCondition::OrderIndependentMultisig(ref multisig_data) => { + multisig_data.tx_fee + } } } @@ -674,6 +951,9 @@ impl TransactionSpendingCondition { match *self { TransactionSpendingCondition::Singlesig(ref data) => data.address_mainnet(), TransactionSpendingCondition::Multisig(ref data) => data.address_mainnet(), + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + data.address_mainnet() + } } } @@ -682,6 +962,9 @@ impl TransactionSpendingCondition { match *self { TransactionSpendingCondition::Singlesig(ref data) => data.address_testnet(), TransactionSpendingCondition::Multisig(ref data) => data.address_testnet(), + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + data.address_testnet() + } } } @@ -707,6 +990,11 @@ impl TransactionSpendingCondition { multisig_data.nonce = 0; multisig_data.fields.clear(); } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut multisig_data) => { + multisig_data.tx_fee = 0; + multisig_data.nonce = 0; + multisig_data.fields.clear(); + } } } @@ -842,6 +1130,9 @@ impl TransactionSpendingCondition { TransactionSpendingCondition::Multisig(ref data) => { data.verify(initial_sighash, cond_code) } + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + data.verify(initial_sighash, cond_code) + } } } } @@ -908,6 +1199,26 @@ impl TransactionAuth { } } + pub fn from_order_independent_p2sh( + privks: &[StacksPrivateKey], + num_sigs: u16, + ) -> Option { + let pubks = privks.iter().map(StacksPublicKey::from_private).collect(); + + TransactionSpendingCondition::new_multisig_order_independent_p2sh(num_sigs, pubks) + .map(TransactionAuth::Standard) + } + + pub fn from_order_independent_p2wsh( + privks: &[StacksPrivateKey], + num_sigs: u16, + ) -> Option { + let pubks = privks.iter().map(StacksPublicKey::from_private).collect(); + + TransactionSpendingCondition::new_multisig_order_independent_p2wsh(num_sigs, pubks) + .map(TransactionAuth::Standard) + } + pub fn from_p2wpkh(privk: &StacksPrivateKey) -> Option { match TransactionSpendingCondition::new_singlesig_p2wpkh(StacksPublicKey::from_private( privk, @@ -1076,10 +1387,40 @@ impl TransactionAuth { } } } + + /// Checks if this TransactionAuth is supported in the passed epoch + /// OrderIndependent multisig is not supported before epoch 3.0 + pub fn is_supported_in_epoch(&self, epoch_id: StacksEpochId) -> bool { + match &self { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + let origin_supported = match origin { + TransactionSpendingCondition::OrderIndependentMultisig(..) => { + epoch_id >= StacksEpochId::Epoch30 + } + _ => true, + }; + let sponsor_supported = match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(..) => { + epoch_id >= StacksEpochId::Epoch30 + } + _ => true, + }; + origin_supported && sponsor_supported + } + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(..) => { + epoch_id >= StacksEpochId::Epoch30 + } + _ => true, + }, + } + } } +#[rustfmt::skip] #[cfg(test)] mod test { + use stacks_common::types::StacksEpochId::Epoch30; use super::*; use crate::chainstate::stacks::{StacksPublicKey as PubKey, *}; use crate::net::codec::test::check_codec_and_corruption; @@ -1102,112 +1443,15 @@ mod test { // hash mode SinglesigHashMode::P2PKH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x7b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // key encoding, TransactionPublicKeyEncoding::Uncompressed as u8, // signature - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, ]; let spending_condition_p2pkh_compressed = SinglesigSpendingCondition { @@ -1223,112 +1467,15 @@ mod test { // hash mode SinglesigHashMode::P2PKH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0x59, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x59, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // key encoding TransactionPublicKeyEncoding::Compressed as u8, // signature - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, ]; let spending_conditions = vec![ @@ -1368,224 +1515,27 @@ mod test { // hash mode MultisigHashMode::P2SH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x7b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fields length - 0x00, - 0x00, - 0x00, - 0x03, + 0x00, 0x00, 0x00, 0x03, // field #1: signature TransactionAuthFieldID::SignatureUncompressed as u8, // field #1: signature - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // field #2: signature TransactionAuthFieldID::SignatureUncompressed as u8, // filed #2: signature - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, // field #3: public key TransactionAuthFieldID::PublicKeyUncompressed as u8, - // field #3: key (compressed) - 0x03, - 0xef, - 0x23, - 0x40, - 0x51, - 0x8b, - 0x58, - 0x67, - 0xb2, - 0x35, - 0x98, - 0xa9, - 0xcf, - 0x74, - 0x61, - 0x1f, - 0x8b, - 0x98, - 0x06, - 0x4f, - 0x7d, - 0x55, - 0xcd, - 0xb8, - 0xc1, - 0x07, - 0xc6, - 0x7b, - 0x5e, - 0xfc, - 0xbc, - 0x5c, - 0x77, + // field #3: key (uncompressed) + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, // number of signatures required - 0x00, - 0x02, + 0x00, 0x02, ]; let spending_condition_p2sh_compressed = MultisigSpendingCondition { @@ -1616,224 +1566,27 @@ mod test { // hash mode MultisigHashMode::P2SH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, // fields length - 0x00, - 0x00, - 0x00, - 0x03, + 0x00, 0x00, 0x00, 0x03, // field #1: signature TransactionAuthFieldID::SignatureCompressed as u8, // field #1: signature - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // field #2: signature TransactionAuthFieldID::SignatureCompressed as u8, // filed #2: signature - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, // field #3: public key TransactionAuthFieldID::PublicKeyCompressed as u8, // field #3: key (compressed) - 0x03, - 0xef, - 0x23, - 0x40, - 0x51, - 0x8b, - 0x58, - 0x67, - 0xb2, - 0x35, - 0x98, - 0xa9, - 0xcf, - 0x74, - 0x61, - 0x1f, - 0x8b, - 0x98, - 0x06, - 0x4f, - 0x7d, - 0x55, - 0xcd, - 0xb8, - 0xc1, - 0x07, - 0xc6, - 0x7b, - 0x5e, - 0xfc, - 0xbc, - 0x5c, - 0x77, + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, // number of signatures - 0x00, - 0x02, + 0x00, 0x02, ]; let spending_conditions = vec![ @@ -1854,144 +1607,51 @@ mod test { } #[test] - fn tx_stacks_spending_condition_p2wpkh() { - let spending_condition_p2wpkh_compressed = SinglesigSpendingCondition { + fn tx_stacks_spending_condition_order_independent_p2sh() { + // order independent p2sh + let spending_condition_order_independent_p2sh_uncompressed = OrderIndependentMultisigSpendingCondition { signer: Hash160([0x11; 20]), - hash_mode: SinglesigHashMode::P2WPKH, - key_encoding: TransactionPublicKeyEncoding::Compressed, - nonce: 345, - tx_fee: 567, - signature: MessageSignature::from_raw(&vec![0xfe; 65]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 123, + tx_fee: 456, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("04ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c771f112f919b00a6c6c5f51f7c63e1762fe9fac9b66ec75a053db7f51f4a52712b").unwrap()), + ], + signatures_required: 2 }; - let spending_condition_p2wpkh_compressed_bytes = vec![ + let spending_condition_order_independent_p2sh_uncompressed_bytes = vec![ // hash mode - SinglesigHashMode::P2WPKH as u8, + OrderIndependentMultisigHashMode::P2SH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0x59, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, - // key encoding - TransactionPublicKeyEncoding::Compressed as u8, - // signature - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fields length + 0x00, 0x00, 0x00, 0x03, + // field #1: signature + TransactionAuthFieldID::SignatureUncompressed as u8, + // field #1: signature + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // field #2: signature + TransactionAuthFieldID::SignatureUncompressed as u8, + // filed #2: signature + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + // field #3: public key + TransactionAuthFieldID::PublicKeyUncompressed as u8, + // field #3: key (uncompressed) + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, + // number of signatures required + 0x00, 0x02, ]; - let spending_conditions = vec![spending_condition_p2wpkh_compressed]; - let spending_conditions_bytes = vec![spending_condition_p2wpkh_compressed_bytes]; - - for i in 0..spending_conditions.len() { - check_codec_and_corruption::( - &spending_conditions[i], - &spending_conditions_bytes[i], - ); - } - } - - #[test] - fn tx_stacks_spending_condition_p2wsh() { - let spending_condition_p2wsh = MultisigSpendingCondition { + let spending_condition_order_independent_p2sh_compressed = OrderIndependentMultisigSpendingCondition { signer: Hash160([0x11; 20]), - hash_mode: MultisigHashMode::P2WSH, + hash_mode: OrderIndependentMultisigHashMode::P2SH, nonce: 456, tx_fee: 567, fields: vec![ @@ -2007,234 +1667,144 @@ mod test { PubKey::from_hex( "03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77", ) - .unwrap(), + .unwrap(), ), ], signatures_required: 2, }; - let spending_condition_p2wsh_bytes = vec![ + let spending_condition_order_independent_p2sh_compressed_bytes = vec![ // hash mode - MultisigHashMode::P2WSH as u8, + OrderIndependentMultisigHashMode::P2SH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, // fields length - 0x00, - 0x00, - 0x00, - 0x03, + 0x00, 0x00, 0x00, 0x03, // field #1: signature TransactionAuthFieldID::SignatureCompressed as u8, // field #1: signature - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // field #2: signature TransactionAuthFieldID::SignatureCompressed as u8, // filed #2: signature - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, // field #3: public key TransactionAuthFieldID::PublicKeyCompressed as u8, // field #3: key (compressed) - 0x03, - 0xef, - 0x23, - 0x40, - 0x51, - 0x8b, - 0x58, - 0x67, - 0xb2, - 0x35, - 0x98, - 0xa9, - 0xcf, - 0x74, - 0x61, - 0x1f, - 0x8b, - 0x98, - 0x06, - 0x4f, - 0x7d, - 0x55, - 0xcd, - 0xb8, - 0xc1, - 0x07, - 0xc6, - 0x7b, - 0x5e, - 0xfc, - 0xbc, - 0x5c, - 0x77, + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, // number of signatures - 0x00, - 0x02, + 0x00, 0x02, + ]; + + let spending_conditions = vec![ + spending_condition_order_independent_p2sh_compressed, + spending_condition_order_independent_p2sh_uncompressed, + ]; + let spending_conditions_bytes = vec![ + spending_condition_order_independent_p2sh_compressed_bytes, + spending_condition_order_independent_p2sh_uncompressed_bytes, + ]; + + for i in 0..spending_conditions.len() { + check_codec_and_corruption::( + &spending_conditions[i], + &spending_conditions_bytes[i], + ); + } + } + + #[test] + fn tx_stacks_spending_condition_p2wpkh() { + let spending_condition_p2wpkh_compressed = SinglesigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: SinglesigHashMode::P2WPKH, + key_encoding: TransactionPublicKeyEncoding::Compressed, + nonce: 345, + tx_fee: 567, + signature: MessageSignature::from_raw(&vec![0xfe; 65]), + }; + + let spending_condition_p2wpkh_compressed_bytes = vec![ + // hash mode + SinglesigHashMode::P2WPKH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x59, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // key encoding + TransactionPublicKeyEncoding::Compressed as u8, + // signature + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + ]; + + let spending_conditions = vec![spending_condition_p2wpkh_compressed]; + let spending_conditions_bytes = vec![spending_condition_p2wpkh_compressed_bytes]; + + for i in 0..spending_conditions.len() { + check_codec_and_corruption::( + &spending_conditions[i], + &spending_conditions_bytes[i], + ); + } + } + + #[test] + fn tx_stacks_spending_condition_p2wsh() { + let spending_condition_p2wsh = MultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: MultisigHashMode::P2WSH, + nonce: 456, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature( + TransactionPublicKeyEncoding::Compressed, + MessageSignature::from_raw(&vec![0xff; 65]), + ), + TransactionAuthField::Signature( + TransactionPublicKeyEncoding::Compressed, + MessageSignature::from_raw(&vec![0xfe; 65]), + ), + TransactionAuthField::PublicKey( + PubKey::from_hex( + "03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77", + ) + .unwrap(), + ), + ], + signatures_required: 2, + }; + + let spending_condition_p2wsh_bytes = vec![ + // hash mode + MultisigHashMode::P2WSH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // fields length + 0x00, 0x00, 0x00, 0x03, + // field #1: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // field #1: signature + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // field #2: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // filed #2: signature + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + // field #3: public key + TransactionAuthFieldID::PublicKeyCompressed as u8, + // field #3: key (compressed) + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, + // number of signatures + 0x00, 0x02, ]; let spending_conditions = vec![spending_condition_p2wsh]; @@ -2292,6 +1862,54 @@ mod test { ], signatures_required: 2 }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 123, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("04ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c771f112f919b00a6c6c5f51f7c63e1762fe9fac9b66ec75a053db7f51f4a52712b").unwrap()), + ], + signatures_required: 2 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 456, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77").unwrap()) + ], + signatures_required: 2 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 123, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xfd; 65])), + ], + signatures_required: 1 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 456, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfd; 65])), + ], + signatures_required: 1 + }), TransactionSpendingCondition::Singlesig(SinglesigSpendingCondition { signer: Hash160([0x11; 20]), hash_mode: SinglesigHashMode::P2WPKH, @@ -2311,6 +1929,30 @@ mod test { TransactionAuthField::PublicKey(PubKey::from_hex("03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77").unwrap()) ], signatures_required: 2 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2WSH, + nonce: 456, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77").unwrap()) + ], + signatures_required: 2 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2WSH, + nonce: 456, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfd; 65])), + ], + signatures_required: 1 }) ]; @@ -2349,799 +1991,151 @@ mod test { // hash mode 0xff, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, // key encoding, TransactionPublicKeyEncoding::Compressed as u8, // signature - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, + 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, ]; let bad_hash_mode_multisig_bytes = vec![ // hash mode MultisigHashMode::P2SH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, // key encoding, TransactionPublicKeyEncoding::Compressed as u8, // signature - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - ]; - - // this will parse into a singlesig spending condition, but data will still remain. - // the reason it parses is because the public keys length field encodes a valid 2-byte - // prefix of a public key, and the parser will lump it into a public key - let bad_hash_mode_singlesig_bytes_parseable = vec![ - // hash mode - SinglesigHashMode::P2PKH as u8, - // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - // nonce (embeds key encoding and part of the parsed nonce) - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, - // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, - // number of fields (embed part of the signature) - 0x00, - 0x00, - 0x00, - 0x01, - // field #1: signature - TransactionAuthFieldID::SignatureCompressed as u8, - // field #1: signature - 0x01, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - // number of signatures - 0x00, - 0x01, + 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, ]; - // wrong number of public keys (too many signatures) - let bad_public_key_count_bytes = vec![ + let bad_hash_mode_order_independent_multisig_bytes = vec![ // hash mode - MultisigHashMode::P2SH as u8, + OrderIndependentMultisigHashMode::P2SH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, - // fields length - 0x00, - 0x00, - 0x00, - 0x03, - // field #1: signature - TransactionAuthFieldID::SignatureCompressed as u8, - // field #1: signature - 0x01, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - // field #2: signature - TransactionAuthFieldID::SignatureCompressed as u8, - // filed #2: signature - 0x02, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - // field #3: public key - TransactionAuthFieldID::PublicKeyCompressed as u8, - // field #3: key (compressed) - 0x03, - 0xef, - 0x23, - 0x40, - 0x51, - 0x8b, - 0x58, - 0x67, - 0xb2, - 0x35, - 0x98, - 0xa9, - 0xcf, - 0x74, - 0x61, - 0x1f, - 0x8b, - 0x98, - 0x06, - 0x4f, - 0x7d, - 0x55, - 0xcd, - 0xb8, - 0xc1, - 0x07, - 0xc6, - 0x7b, - 0x5e, - 0xfc, - 0xbc, - 0x5c, - 0x77, - // number of signatures - 0x00, - 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // key encoding, + TransactionPublicKeyEncoding::Compressed as u8, + // signature + 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, ]; - // wrong number of public keys (not enough signatures) - let bad_public_key_count_bytes_2 = vec![ - // hash mode - MultisigHashMode::P2SH as u8, - // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, - // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, - // fields length - 0x00, - 0x00, - 0x00, - 0x03, - // field #1: signature - TransactionAuthFieldID::SignatureCompressed as u8, - // field #1: signature - 0x01, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + // this will parse into a singlesig spending condition, but data will still remain. + // the reason it parses is because the public keys length field encodes a valid 2-byte + // prefix of a public key, and the parser will lump it into a public key + let bad_hash_mode_singlesig_bytes_parseable = vec![ + // hash mode + SinglesigHashMode::P2PKH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce (embeds key encoding and part of the parsed nonce) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // number of fields (embed part of the signature) + 0x00, 0x00, 0x00, 0x01, + // field #1: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // field #1: signature + 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // number of signatures + 0x00, 0x01, + ]; + + // wrong number of public keys (too many signatures) + let bad_public_key_count_bytes = vec![ + // hash mode + MultisigHashMode::P2SH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // fields length + 0x00, 0x00, 0x00, 0x03, + // field #1: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // field #1: signature + 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // field #2: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // filed #2: signature + 0x02, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + // field #3: public key + TransactionAuthFieldID::PublicKeyCompressed as u8, + // field #3: key (compressed) + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, + // number of signatures + 0x00, 0x01, + ]; + + // wrong number of public keys (not enough signatures) + let bad_public_key_count_bytes_2 = vec![ + // hash mode + MultisigHashMode::P2SH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // fields length + 0x00, 0x00, 0x00, 0x03, + // field #1: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // field #1: signature + 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // field #2: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // filed #2: signature + 0x02, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + // field #3: public key + TransactionAuthFieldID::PublicKeyCompressed as u8, + // field #3: key (compressed) + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, + // number of signatures + 0x00, 0x03, + ]; + + // wrong number of public keys (not enough signatures) + let bad_public_key_count_bytes_3 = vec![ + // hash mode + OrderIndependentMultisigHashMode::P2SH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // fields length + 0x00, 0x00, 0x00, 0x03, + // field #1: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // field #1: signature + 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // field #2: signature TransactionAuthFieldID::SignatureCompressed as u8, // filed #2: signature - 0x02, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0x02, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, // field #3: public key TransactionAuthFieldID::PublicKeyCompressed as u8, // field #3: key (compressed) - 0x03, - 0xef, - 0x23, - 0x40, - 0x51, - 0x8b, - 0x58, - 0x67, - 0xb2, - 0x35, - 0x98, - 0xa9, - 0xcf, - 0x74, - 0x61, - 0x1f, - 0x8b, - 0x98, - 0x06, - 0x4f, - 0x7d, - 0x55, - 0xcd, - 0xb8, - 0xc1, - 0x07, - 0xc6, - 0x7b, - 0x5e, - 0xfc, - 0xbc, - 0x5c, - 0x77, + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, // number of signatures - 0x00, - 0x03, + 0x00, 0x03, ]; // hashing mode doesn't allow uncompressed keys @@ -3159,112 +2153,15 @@ mod test { // hash mode SinglesigHashMode::P2WPKH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x7b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, // public key uncompressed TransactionPublicKeyEncoding::Uncompressed as u8, // signature - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, ]; // hashing mode doesn't allow uncompressed keys @@ -3285,221 +2182,62 @@ mod test { // hash mode MultisigHashMode::P2WSH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, // number of fields - 0x00, - 0x00, - 0x00, - 0x03, + 0x00, 0x00, 0x00, 0x03, // signature TransactionAuthFieldID::SignatureUncompressed as u8, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // signature TransactionAuthFieldID::SignatureUncompressed as u8, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, // key TransactionAuthFieldID::PublicKeyUncompressed as u8, - 0x02, - 0xb7, - 0xe1, - 0x0d, - 0xd2, - 0xc0, - 0x2d, - 0xec, - 0x64, - 0x88, - 0x80, - 0xea, - 0x34, - 0x6e, - 0xce, - 0x86, - 0xa7, - 0x82, - 0x0c, - 0x4f, - 0xa5, - 0x11, - 0x4f, - 0xb5, - 0x00, - 0xb2, - 0x64, - 0x5f, - 0x6c, - 0x97, - 0x20, - 0x92, - 0xdb, + 0x02, 0xb7, 0xe1, 0x0d, 0xd2, 0xc0, 0x2d, 0xec, 0x64, 0x88, 0x80, 0xea, 0x34, 0x6e, 0xce, 0x86, 0xa7, 0x82, 0x0c, 0x4f, 0xa5, 0x11, 0x4f, 0xb5, 0x00, 0xb2, 0x64, 0x5f, 0x6c, 0x97, 0x20, 0x92, 0xdb, // signatures - 0x00, - 0x02, + 0x00, 0x02, + ]; + + // hashing mode doesn't allow uncompressed keys + let bad_order_independent_p2wsh_uncompressed = TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2WSH, + nonce: 456, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("04b7e10dd2c02dec648880ea346ece86a7820c4fa5114fb500b2645f6c972092dbe2334a653db0ab8d8ccffa6c35d3919e4cf8da3aeedafc7b9eb8235d0f2e7fdc").unwrap()), + ], + signatures_required: 2 + }); + + let bad_order_independent_p2wsh_uncompressed_bytes = vec![ + // hash mode + OrderIndependentMultisigHashMode::P2WSH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // number of fields + 0x00, 0x00, 0x00, 0x03, + // signature + TransactionAuthFieldID::SignatureUncompressed as u8, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // signature + TransactionAuthFieldID::SignatureUncompressed as u8, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + // key + TransactionAuthFieldID::PublicKeyUncompressed as u8, + 0x02, 0xb7, 0xe1, 0x0d, 0xd2, 0xc0, 0x2d, 0xec, 0x64, 0x88, 0x80, 0xea, 0x34, 0x6e, 0xce, 0x86, 0xa7, 0x82, 0x0c, 0x4f, 0xa5, 0x11, 0x4f, 0xb5, 0x00, 0xb2, 0x64, 0x5f, 0x6c, 0x97, 0x20, 0x92, 0xdb, + // signatures + 0x00, 0x02, ]; // we can serialize the invalid p2wpkh uncompressed condition, but we can't deserialize it @@ -3516,6 +2254,13 @@ mod test { .unwrap(); assert_eq!(actual_bytes, bad_p2wsh_uncompressed_bytes); + // we can serialize the invalid p2wsh uncompressed condition, but we can't deserialize it + let mut actual_bytes = vec![]; + bad_order_independent_p2wsh_uncompressed + .consensus_serialize(&mut actual_bytes) + .unwrap(); + assert_eq!(actual_bytes, bad_order_independent_p2wsh_uncompressed_bytes); + assert!(TransactionSpendingCondition::consensus_deserialize( &mut &bad_public_key_count_bytes[..] ) @@ -3524,6 +2269,10 @@ mod test { &mut &bad_public_key_count_bytes_2[..] ) .is_err()); + assert!(TransactionSpendingCondition::consensus_deserialize( + &mut &bad_public_key_count_bytes_3[..] + ) + .is_err()); assert!( TransactionSpendingCondition::consensus_deserialize(&mut &bad_hash_mode_bytes[..]) .is_err() @@ -3532,6 +2281,10 @@ mod test { &mut &bad_hash_mode_multisig_bytes[..] ) .is_err()); + assert!(TransactionSpendingCondition::consensus_deserialize( + &mut &bad_hash_mode_order_independent_multisig_bytes[..] + ) + .is_err()); assert!(TransactionSpendingCondition::consensus_deserialize( &mut &bad_p2wpkh_uncompressed_bytes[..] ) @@ -3540,6 +2293,10 @@ mod test { &mut &bad_p2wsh_uncompressed_bytes[..] ) .is_err()); + assert!(TransactionSpendingCondition::consensus_deserialize( + &mut &bad_order_independent_p2wsh_uncompressed_bytes[..] + ) + .is_err()); // corrupt but will parse with trailing bits assert!(TransactionSpendingCondition::consensus_deserialize( @@ -3633,4 +2390,90 @@ mod test { assert_eq!(next_pubkey, StacksPublicKey::from_private(&keys[i])); } } + + fn tx_auth_check_all_epochs( + auth: TransactionAuth, + activation_epoch_id: Option, + ) { + let epoch_list = [ + StacksEpochId::Epoch10, + StacksEpochId::Epoch20, + StacksEpochId::Epoch2_05, + StacksEpochId::Epoch21, + StacksEpochId::Epoch22, + StacksEpochId::Epoch23, + StacksEpochId::Epoch24, + StacksEpochId::Epoch25, + StacksEpochId::Epoch30, + ]; + + for epoch_id in epoch_list.iter() { + if activation_epoch_id.is_none() { + assert_eq!(auth.is_supported_in_epoch(*epoch_id), true); + } else if activation_epoch_id.unwrap() > *epoch_id { + assert_eq!(auth.is_supported_in_epoch(*epoch_id), false); + } else { + assert_eq!(auth.is_supported_in_epoch(*epoch_id), true); + } + } + } + + #[test] + fn tx_auth_is_supported_in_epoch() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ).unwrap(); + + let privk_2 = StacksPrivateKey::from_hex( + "7e3af4db6af6b3c67e2c6c6d7d5983b519f4d9b3a6e00580ae96dcace3bde8bc01", + ).unwrap(); + + let auth_p2pkh = TransactionAuth::from_p2pkh(&privk_1).unwrap(); + let auth_sponsored_p2pkh = auth_p2pkh.clone().into_sponsored( + TransactionAuth::from_p2pkh(&privk_2).unwrap() + ).unwrap(); + + tx_auth_check_all_epochs(auth_p2pkh, None); + tx_auth_check_all_epochs(auth_sponsored_p2pkh, None); + + let auth_p2wpkh = TransactionAuth::from_p2wpkh(&privk_1).unwrap(); + let auth_sponsored_p2wpkh = auth_p2wpkh.clone().into_sponsored( + TransactionAuth::from_p2wpkh(&privk_2).unwrap() + ).unwrap(); + + tx_auth_check_all_epochs(auth_p2wpkh, None); + tx_auth_check_all_epochs(auth_sponsored_p2wpkh, None); + + let auth_p2sh = TransactionAuth::from_p2sh(&[privk_1, privk_2], 2).unwrap(); + let auth_sponsored_p2sh = auth_p2sh.clone().into_sponsored( + TransactionAuth::from_p2sh(&[privk_1, privk_2], 2).unwrap() + ).unwrap(); + + tx_auth_check_all_epochs(auth_p2sh, None); + tx_auth_check_all_epochs(auth_sponsored_p2sh, None); + + let auth_p2wsh = TransactionAuth::from_p2wsh(&[privk_1, privk_2], 2).unwrap(); + let auth_sponsored_p2wsh = auth_p2wsh.clone().into_sponsored( + TransactionAuth::from_p2wsh(&[privk_1, privk_2], 2).unwrap() + ).unwrap(); + + tx_auth_check_all_epochs(auth_p2wsh, None); + tx_auth_check_all_epochs(auth_sponsored_p2wsh, None); + + let auth_order_independent_p2sh = TransactionAuth::from_order_independent_p2sh(&[privk_1, privk_2], 2).unwrap(); + let auth_sponsored_order_independent_p2sh = auth_order_independent_p2sh.clone().into_sponsored( + TransactionAuth::from_order_independent_p2sh(&[privk_1, privk_2], 2).unwrap() + ).unwrap(); + + tx_auth_check_all_epochs(auth_order_independent_p2sh, Some(StacksEpochId::Epoch30)); + tx_auth_check_all_epochs(auth_sponsored_order_independent_p2sh, Some(StacksEpochId::Epoch30)); + + let auth_order_independent_p2wsh = TransactionAuth::from_order_independent_p2wsh(&[privk_1, privk_2], 2).unwrap(); + let auth_sponsored_order_independent_p2wsh = auth_order_independent_p2wsh.clone().into_sponsored( + TransactionAuth::from_order_independent_p2wsh(&[privk_1, privk_2], 2).unwrap() + ).unwrap(); + + tx_auth_check_all_epochs(auth_order_independent_p2wsh, Some(StacksEpochId::Epoch30)); + tx_auth_check_all_epochs(auth_sponsored_order_independent_p2wsh, Some(StacksEpochId::Epoch30)); + } } diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 2932231103..9827d28e9c 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -21,7 +21,8 @@ use std::io::{Read, Write}; use sha2::{Digest, Sha512_256}; use stacks_common::codec::{ - read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, + read_next, read_next_at_most, write_next, Error as codec_error, StacksMessageCodec, + MAX_MESSAGE_LEN, }; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksWorkScore, TrieHash, VRFSeed, @@ -308,7 +309,7 @@ impl StacksMessageCodec for StacksBlock { let header: StacksBlockHeader = read_next(fd)?; let txs: Vec = { let mut bound_read = BoundReader::from_reader(fd, MAX_MESSAGE_LEN as u64); - read_next(&mut bound_read) + read_next_at_most(&mut bound_read, u32::MAX) }?; // there must be at least one transaction (the coinbase) @@ -569,37 +570,52 @@ impl StacksBlock { epoch_id: StacksEpochId, ) -> bool { for tx in txs.iter() { - if let TransactionPayload::Coinbase(_, ref recipient_opt, ref proof_opt) = &tx.payload { - if proof_opt.is_some() && epoch_id < StacksEpochId::Epoch30 { - // not supported - error!("Coinbase with VRF proof not supported before Stacks 3.0"; "txid" => %tx.txid()); - return false; - } - if proof_opt.is_none() && epoch_id >= StacksEpochId::Epoch30 { - // not supported - error!("Coinbase with VRF proof is required in Stacks 3.0 and later"; "txid" => %tx.txid()); - return false; - } - if recipient_opt.is_some() && epoch_id < StacksEpochId::Epoch21 { - // not supported - error!("Coinbase pay-to-alt-recipient not supported before Stacks 2.1"; "txid" => %tx.txid()); - return false; - } + if !StacksBlock::validate_transaction_static_epoch(tx, epoch_id) { + return false; } - if let TransactionPayload::SmartContract(_, ref version_opt) = &tx.payload { - if version_opt.is_some() && epoch_id < StacksEpochId::Epoch21 { - // not supported - error!("Versioned smart contracts not supported before Stacks 2.1"); - return false; - } + } + return true; + } + + /// Verify that one transaction is supported in the given epoch, as indicated by `epoch_id` + pub fn validate_transaction_static_epoch( + tx: &StacksTransaction, + epoch_id: StacksEpochId, + ) -> bool { + if let TransactionPayload::Coinbase(_, ref recipient_opt, ref proof_opt) = &tx.payload { + if proof_opt.is_some() && epoch_id < StacksEpochId::Epoch30 { + // not supported + error!("Coinbase with VRF proof not supported before Stacks 3.0"; "txid" => %tx.txid()); + return false; } - if let TransactionPayload::TenureChange(..) = &tx.payload { - if epoch_id < StacksEpochId::Epoch30 { - error!("TenureChange transaction not supported before Stacks 3.0"; "txid" => %tx.txid()); - return false; - } + if proof_opt.is_none() && epoch_id >= StacksEpochId::Epoch30 { + // not supported + error!("Coinbase with VRF proof is required in Stacks 3.0 and later"; "txid" => %tx.txid()); + return false; + } + if recipient_opt.is_some() && epoch_id < StacksEpochId::Epoch21 { + // not supported + error!("Coinbase pay-to-alt-recipient not supported before Stacks 2.1"; "txid" => %tx.txid()); + return false; + } + } + if let TransactionPayload::SmartContract(_, ref version_opt) = &tx.payload { + if version_opt.is_some() && epoch_id < StacksEpochId::Epoch21 { + // not supported + error!("Versioned smart contracts not supported before Stacks 2.1"); + return false; } } + if let TransactionPayload::TenureChange(..) = &tx.payload { + if epoch_id < StacksEpochId::Epoch30 { + error!("TenureChange transaction not supported before Stacks 3.0"; "txid" => %tx.txid()); + return false; + } + } + if !tx.auth.is_supported_in_epoch(epoch_id) { + error!("Authentication mode not supported in Epoch {epoch_id}"); + return false; + } return true; } @@ -1061,7 +1077,7 @@ mod test { signature: MessageSignature([0x0cu8; 65]), }; - let mut block = make_codec_test_block(100000000); + let mut block = make_codec_test_block(100000000, StacksEpochId::latest()); block.header.version = 0x24; let ph = block.header.parent_block.as_bytes().to_vec(); @@ -1125,6 +1141,7 @@ mod test { 0x80000000, &TransactionAnchorMode::OffChainOnly, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); // remove all coinbases @@ -1672,6 +1689,103 @@ mod test { } } + fn verify_block_epoch_validation( + txs: &[StacksTransaction], + tx_coinbase_old: Option, + tx_coinbase_nakamoto: Option, + activation_epoch_id: StacksEpochId, + header: StacksBlockHeader, + deactivation_epoch_id: Option, + ) { + let epoch_list = [ + StacksEpochId::Epoch10, + StacksEpochId::Epoch20, + StacksEpochId::Epoch2_05, + StacksEpochId::Epoch21, + StacksEpochId::Epoch22, + StacksEpochId::Epoch23, + StacksEpochId::Epoch24, + StacksEpochId::Epoch25, + StacksEpochId::Epoch30, + ]; + let get_tx_root = |txs: &Vec| { + let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + let tx_merkle_root = merkle_tree.root(); + tx_merkle_root + }; + let mut block_header_dup_tx = header.clone(); + block_header_dup_tx.tx_merkle_root = get_tx_root(&txs.to_vec()); + + let block = StacksBlock { + header: block_header_dup_tx.clone(), + txs: txs.to_vec(), + }; + + let block_with_coinbase_tx = tx_coinbase_old.map(|coinbase| { + let mut txs_with_coinbase = txs.to_vec(); + txs_with_coinbase.insert(0, coinbase); + + let mut block_header_dup_tx_with_coinbase = header.clone(); + block_header_dup_tx_with_coinbase.tx_merkle_root = + get_tx_root(&txs_with_coinbase.to_vec()); + + StacksBlock { + header: block_header_dup_tx_with_coinbase.clone(), + txs: txs_with_coinbase, + } + }); + + let block_with_coinbase_tx_nakamoto = tx_coinbase_nakamoto.map(|coinbase| { + let mut txs_with_coinbase_nakamoto = txs.to_vec(); + txs_with_coinbase_nakamoto.insert(0, coinbase); + + let mut block_header_dup_tx_with_coinbase_nakamoto = header.clone(); + block_header_dup_tx_with_coinbase_nakamoto.tx_merkle_root = + get_tx_root(&txs_with_coinbase_nakamoto.to_vec()); + + StacksBlock { + header: block_header_dup_tx_with_coinbase_nakamoto.clone(), + txs: txs_with_coinbase_nakamoto, + } + }); + + for epoch_id in epoch_list.iter() { + let block_to_check = if *epoch_id >= StacksEpochId::Epoch30 + && block_with_coinbase_tx_nakamoto.is_some() + { + block_with_coinbase_tx_nakamoto.clone().unwrap() + } else if *epoch_id >= StacksEpochId::Epoch21 + && *epoch_id < StacksEpochId::Epoch30 + && block_with_coinbase_tx.is_some() + { + block_with_coinbase_tx.clone().unwrap() + } else { + block.clone() + }; + + let mut bytes: Vec = vec![]; + block_to_check.consensus_serialize(&mut bytes).unwrap(); + + if *epoch_id < activation_epoch_id { + assert!(!StacksBlock::validate_transactions_static_epoch( + &txs, + epoch_id.clone(), + )); + } else if deactivation_epoch_id.is_none() || deactivation_epoch_id.unwrap() > *epoch_id + { + assert!(StacksBlock::validate_transactions_static_epoch( + &txs, *epoch_id, + )); + } else { + assert!(!StacksBlock::validate_transactions_static_epoch( + &txs, *epoch_id, + )); + } + } + } + #[test] fn test_block_validate_transactions_static() { let header = StacksBlockHeader { @@ -1689,6 +1803,11 @@ mod test { microblock_pubkey_hash: Hash160([9u8; 20]), }; + let stx_address = StacksAddress { + version: 0, + bytes: Hash160([0u8; 20]), + }; + let privk = StacksPrivateKey::from_hex( "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", ) @@ -1699,6 +1818,135 @@ mod test { )) .unwrap(), ); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let order_independent_multisig_condition_p2wsh = + TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let order_independent_multisig_condition_p2sh = + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let order_independent_sponsored_auth_p2sh = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &privk, + )) + .unwrap(), + order_independent_multisig_condition_p2sh.clone(), + ); + + let order_independent_sponsored_auth_p2wsh = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &privk, + )) + .unwrap(), + order_independent_multisig_condition_p2wsh.clone(), + ); + let order_independent_origin_auth_p2sh = + TransactionAuth::Standard(order_independent_multisig_condition_p2sh.clone()); + + let order_independent_origin_auth_p2wsh = + TransactionAuth::Standard(order_independent_multisig_condition_p2wsh.clone()); + + let order_independent_multisig_tx_transfer_mainnet_p2sh = StacksTransaction::new( + TransactionVersion::Mainnet, + order_independent_origin_auth_p2sh.clone(), + TransactionPayload::TokenTransfer( + stx_address.into(), + 123, + TokenTransferMemo([1u8; 34]), + ), + ); + + let order_independent_multisig_tx_transfer_mainnet_p2wsh = StacksTransaction::new( + TransactionVersion::Mainnet, + order_independent_origin_auth_p2wsh.clone(), + TransactionPayload::TokenTransfer( + stx_address.into(), + 123, + TokenTransferMemo([1u8; 34]), + ), + ); + + let order_independent_sponsored_multisig_tx_transfer_mainnet_p2sh = StacksTransaction::new( + TransactionVersion::Mainnet, + order_independent_sponsored_auth_p2sh.clone(), + TransactionPayload::TokenTransfer( + stx_address.into(), + 123, + TokenTransferMemo([1u8; 34]), + ), + ); + + let order_independent_sponsored_multisig_tx_transfer_mainnet_p2wsh = StacksTransaction::new( + TransactionVersion::Mainnet, + order_independent_sponsored_auth_p2wsh.clone(), + TransactionPayload::TokenTransfer( + stx_address.into(), + 123, + TokenTransferMemo([1u8; 34]), + ), + ); + + let mut tx_signer = + StacksTransactionSigner::new(&order_independent_multisig_tx_transfer_mainnet_p2sh); + tx_signer.sign_origin(&privk_1).unwrap(); + tx_signer.sign_origin(&privk_2).unwrap(); + tx_signer.append_origin(&pubk_3).unwrap(); + let order_independent_multisig_tx_transfer_mainnet_p2sh_signed = + tx_signer.get_tx().unwrap(); + + let mut tx_signer = + StacksTransactionSigner::new(&order_independent_multisig_tx_transfer_mainnet_p2wsh); + tx_signer.sign_origin(&privk_1).unwrap(); + tx_signer.sign_origin(&privk_2).unwrap(); + tx_signer.append_origin(&pubk_3).unwrap(); + let order_independent_multisig_tx_transfer_mainnet_p2wsh_signed = + tx_signer.get_tx().unwrap(); + + let mut tx_signer = StacksTransactionSigner::new( + &order_independent_sponsored_multisig_tx_transfer_mainnet_p2sh, + ); + tx_signer.sign_origin(&privk).unwrap(); + tx_signer.sign_sponsor(&privk_1).unwrap(); + tx_signer.sign_sponsor(&privk_2).unwrap(); + tx_signer.append_sponsor(&pubk_3).unwrap(); + let order_independent_sponsored_multisig_tx_transfer_mainnet_p2sh_signed = + tx_signer.get_tx().unwrap(); + + let mut tx_signer = StacksTransactionSigner::new( + &order_independent_sponsored_multisig_tx_transfer_mainnet_p2wsh, + ); + tx_signer.sign_origin(&privk).unwrap(); + tx_signer.sign_sponsor(&privk_1).unwrap(); + tx_signer.sign_sponsor(&privk_2).unwrap(); + tx_signer.append_sponsor(&pubk_3).unwrap(); + let order_independent_sponsored_multisig_tx_transfer_mainnet_p2wsh_signed = + tx_signer.get_tx().unwrap(); + let tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, origin_auth.clone(), @@ -1810,6 +2058,12 @@ mod test { let nakamoto_coinbase = vec![tx_coinbase_proof.clone()]; let tenure_change_tx = vec![tx_tenure_change.clone()]; let nakamoto_txs = vec![tx_coinbase_proof.clone(), tx_tenure_change.clone()]; + let order_independent_multisig_txs = vec![ + order_independent_multisig_tx_transfer_mainnet_p2sh_signed.clone(), + order_independent_sponsored_multisig_tx_transfer_mainnet_p2sh_signed.clone(), + order_independent_multisig_tx_transfer_mainnet_p2wsh_signed.clone(), + order_independent_sponsored_multisig_tx_transfer_mainnet_p2wsh_signed.clone(), + ]; assert!(!StacksBlock::validate_transactions_unique(&dup_txs)); assert!(!StacksBlock::validate_transactions_network( @@ -1822,47 +2076,55 @@ mod test { )); assert!(!StacksBlock::validate_anchor_mode(&offchain_txs, true)); assert!(!StacksBlock::validate_coinbase(&no_coinbase, true)); - assert!(!StacksBlock::validate_transactions_static_epoch( - &coinbase_contract, - StacksEpochId::Epoch2_05 - )); - assert!(StacksBlock::validate_transactions_static_epoch( - &coinbase_contract, - StacksEpochId::Epoch21 - )); - assert!(!StacksBlock::validate_transactions_static_epoch( - &versioned_contract, - StacksEpochId::Epoch2_05 - )); - assert!(StacksBlock::validate_transactions_static_epoch( + verify_block_epoch_validation( &versioned_contract, - StacksEpochId::Epoch21 - )); - assert!(!StacksBlock::validate_transactions_static_epoch( - &nakamoto_coinbase, - StacksEpochId::Epoch21 - )); - assert!(StacksBlock::validate_transactions_static_epoch( - &nakamoto_coinbase, - StacksEpochId::Epoch30 - )); - assert!(!StacksBlock::validate_transactions_static_epoch( + Some(tx_coinbase.clone()), + Some(tx_coinbase_proof.clone()), + StacksEpochId::Epoch21, + header.clone(), + None, + ); + verify_block_epoch_validation( &coinbase_contract, - StacksEpochId::Epoch30 - )); - assert!(!StacksBlock::validate_transactions_static_epoch( - &tenure_change_tx, - StacksEpochId::Epoch21 - )); - assert!(StacksBlock::validate_transactions_static_epoch( - &nakamoto_txs, - StacksEpochId::Epoch30 - )); - assert!(!StacksBlock::validate_transactions_static_epoch( + None, + None, + StacksEpochId::Epoch21, + header.clone(), + Some(StacksEpochId::Epoch30), + ); + verify_block_epoch_validation( + &order_independent_multisig_txs, + Some(tx_coinbase.clone()), + Some(tx_coinbase_proof.clone()), + StacksEpochId::Epoch30, + header.clone(), + None, + ); + verify_block_epoch_validation( &nakamoto_txs, - StacksEpochId::Epoch21 - )); + Some(tx_coinbase.clone()), + None, + StacksEpochId::Epoch30, + header.clone(), + None, + ); + verify_block_epoch_validation( + &nakamoto_coinbase, + Some(tx_coinbase.clone()), + None, + StacksEpochId::Epoch30, + header.clone(), + None, + ); + verify_block_epoch_validation( + &tenure_change_tx, + Some(tx_coinbase.clone()), + Some(tx_coinbase_proof.clone()), + StacksEpochId::Epoch30, + header.clone(), + None, + ); } // TODO: diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 23f2c92008..5d8588836e 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -3,7 +3,7 @@ use std::collections::{HashMap, VecDeque}; use clarity::vm::analysis::arithmetic_checker::ArithmeticOnlyChecker; use clarity::vm::analysis::mem_type_check; use clarity::vm::ast::ASTRules; -use clarity::vm::clarity::TransactionConnection; +use clarity::vm::clarity::{ClarityConnection, TransactionConnection}; use clarity::vm::contexts::OwnedEnvironment; use clarity::vm::contracts::Contract; use clarity::vm::costs::CostOverflowingMath; @@ -84,7 +84,8 @@ lazy_static! { pub struct ClarityTestSim { marf: MarfedKV, - pub height: u64, + pub block_height: u64, + pub tenure_height: u64, fork: u64, /// This vec specifies the transitions for each epoch. /// It is a list of heights at which the simulated chain transitions @@ -134,33 +135,43 @@ impl ClarityTestSim { ClarityTestSim { marf, - height: 0, + block_height: 0, + tenure_height: 0, fork: 0, epoch_bounds: vec![0, u64::MAX], } } - pub fn execute_next_block_as_conn(&mut self, f: F) -> R + pub fn execute_next_block_as_conn_with_tenure(&mut self, new_tenure: bool, f: F) -> R where F: FnOnce(&mut ClarityBlockConnection) -> R, { let r = { let mut store = self.marf.begin( - &StacksBlockId(test_sim_height_to_hash(self.height, self.fork)), - &StacksBlockId(test_sim_height_to_hash(self.height + 1, self.fork)), + &StacksBlockId(test_sim_height_to_hash(self.block_height, self.fork)), + &StacksBlockId(test_sim_height_to_hash(self.block_height + 1, self.fork)), ); let headers_db = TestSimHeadersDB { - height: self.height + 1, + height: self.block_height + 1, }; let burn_db = TestSimBurnStateDB { epoch_bounds: self.epoch_bounds.clone(), pox_constants: PoxConstants::test_default(), - height: (self.height + 100).try_into().unwrap(), + height: (self.tenure_height + 100).try_into().unwrap(), }; let cur_epoch = Self::check_and_bump_epoch(&mut store, &headers_db, &burn_db); + let mut db = store.as_clarity_db(&headers_db, &burn_db); + if cur_epoch >= StacksEpochId::Epoch30 { + db.begin(); + db.set_tenure_height(self.tenure_height as u32 + if new_tenure { 1 } else { 0 }) + .expect("FAIL: unable to set tenure height in Clarity database"); + db.commit() + .expect("FAIL: unable to commit tenure height in Clarity database"); + } + let mut block_conn = ClarityBlockConnection::new_test_conn(store, &headers_db, &burn_db, cur_epoch); let r = f(&mut block_conn); @@ -169,43 +180,70 @@ impl ClarityTestSim { r }; - self.height += 1; + self.block_height += 1; + if new_tenure { + self.tenure_height += 1; + } r } - pub fn execute_next_block(&mut self, f: F) -> R + pub fn execute_next_block_as_conn(&mut self, f: F) -> R + where + F: FnOnce(&mut ClarityBlockConnection) -> R, + { + self.execute_next_block_as_conn_with_tenure(true, f) + } + + pub fn execute_next_block_with_tenure(&mut self, new_tenure: bool, f: F) -> R where F: FnOnce(&mut OwnedEnvironment) -> R, { let mut store = self.marf.begin( - &StacksBlockId(test_sim_height_to_hash(self.height, self.fork)), - &StacksBlockId(test_sim_height_to_hash(self.height + 1, self.fork)), + &StacksBlockId(test_sim_height_to_hash(self.block_height, self.fork)), + &StacksBlockId(test_sim_height_to_hash(self.block_height + 1, self.fork)), ); let r = { let headers_db = TestSimHeadersDB { - height: self.height + 1, + height: self.block_height + 1, }; let burn_db = TestSimBurnStateDB { epoch_bounds: self.epoch_bounds.clone(), pox_constants: PoxConstants::test_default(), - height: (self.height + 100).try_into().unwrap(), + height: (self.tenure_height + 100).try_into().unwrap(), }; let cur_epoch = Self::check_and_bump_epoch(&mut store, &headers_db, &burn_db); debug!("Execute block in epoch {}", &cur_epoch); - let db = store.as_clarity_db(&headers_db, &burn_db); + let mut db = store.as_clarity_db(&headers_db, &burn_db); + if cur_epoch >= StacksEpochId::Epoch30 { + db.begin(); + db.set_tenure_height(self.tenure_height as u32 + if new_tenure { 1 } else { 0 }) + .expect("FAIL: unable to set tenure height in Clarity database"); + db.commit() + .expect("FAIL: unable to commit tenure height in Clarity database"); + } let mut owned_env = OwnedEnvironment::new_toplevel(db); f(&mut owned_env) }; store.test_commit(); - self.height += 1; + self.block_height += 1; + if new_tenure { + self.tenure_height += 1; + } r } + pub fn execute_next_block(&mut self, f: F) -> R + where + F: FnOnce(&mut OwnedEnvironment) -> R, + { + self.execute_next_block_with_tenure(true, f) + } + fn check_and_bump_epoch( store: &mut WritableMarfStore, headers_db: &TestSimHeadersDB, @@ -253,7 +291,8 @@ impl ClarityTestSim { }; store.test_commit(); - self.height = parent_height + 1; + self.block_height = parent_height + 1; + self.tenure_height = parent_height + 1; self.fork += 1; r @@ -370,7 +409,10 @@ impl BurnStateDB for TestSimBurnStateDB { 2 => StacksEpochId::Epoch21, 3 => StacksEpochId::Epoch22, 4 => StacksEpochId::Epoch23, - _ => panic!("Epoch unknown"), + 5 => StacksEpochId::Epoch24, + 6 => StacksEpochId::Epoch25, + 7 => StacksEpochId::Epoch30, + _ => panic!("Invalid epoch index"), }; Some(StacksEpoch { @@ -2703,7 +2745,7 @@ fn test_vote_fail() { ); }); - let fork_start = sim.height; + let fork_start = sim.block_height; for i in 0..25 { sim.execute_next_block(|env| { diff --git a/stackslib/src/chainstate/stacks/boot/docs.rs b/stackslib/src/chainstate/stacks/boot/docs.rs index 62580f384a..28066abc71 100644 --- a/stackslib/src/chainstate/stacks/boot/docs.rs +++ b/stackslib/src/chainstate/stacks/boot/docs.rs @@ -1,4 +1,20 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use clarity::vm::docs::contracts::{produce_docs_refs, ContractSupportDocs}; +use clarity::vm::ClarityVersion; use hashbrown::{HashMap, HashSet}; use super::STACKS_BOOT_CODE_MAINNET; @@ -139,7 +155,11 @@ If your name is in a namespace where names do not expire, then you never need to pub fn make_json_boot_contracts_reference() -> String { let contract_supporting_docs = make_contract_support_docs(); - let api_out = produce_docs_refs(&*STACKS_BOOT_CODE_MAINNET, &contract_supporting_docs); + let api_out = produce_docs_refs( + &*STACKS_BOOT_CODE_MAINNET, + &contract_supporting_docs, + ClarityVersion::Clarity1, + ); format!( "{}", serde_json::to_string(&api_out).expect("Failed to serialize documentation") diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 539654d30a..d3e8a494de 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -119,7 +119,7 @@ lazy_static! { format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, POX_3_BODY); pub static ref POX_3_TESTNET_CODE: String = format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_3_BODY); - pub static ref POX_4_CODE: String = format!("{}", POX_4_BODY); + pub static ref POX_4_CODE: String = POX_4_BODY.to_string(); pub static ref BOOT_CODE_COST_VOTING_TESTNET: String = make_testnet_cost_voting(); pub static ref STACKS_BOOT_CODE_MAINNET: [(&'static str, &'static str); 6] = [ ("pox", &BOOT_CODE_POX_MAINNET), @@ -1371,6 +1371,7 @@ pub mod test { use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::*; + use self::signers_tests::readonly_call; use super::*; use crate::burnchains::{Address, PublicKey}; use crate::chainstate::burn::db::sortdb::*; @@ -1728,6 +1729,58 @@ pub mod test { } } + pub fn get_stacker_info_pox_4( + peer: &mut TestPeer, + addr: &PrincipalData, + ) -> Option<(PoxAddress, u128, u128, Vec)> { + let value_opt = eval_at_tip( + peer, + "pox-4", + &format!("(get-stacker-info '{})", addr.to_string()), + ); + let data = if let Some(d) = value_opt.expect_optional().unwrap() { + d + } else { + return None; + }; + + let data = data.expect_tuple().unwrap(); + let pox_addr = tuple_to_pox_addr( + data.get("pox-addr") + .unwrap() + .to_owned() + .expect_tuple() + .unwrap(), + ); + let first_reward_cycle = data + .get("first-reward-cycle") + .unwrap() + .to_owned() + .expect_u128() + .unwrap(); + let lock_period = data + .get("lock-period") + .unwrap() + .to_owned() + .expect_u128() + .unwrap(); + let reward_set_indices = data + .get("reward-set-indexes") + .unwrap() + .to_owned() + .expect_list() + .unwrap() + .iter() + .map(|v| v.to_owned().expect_u128().unwrap()) + .collect(); + Some(( + pox_addr, + first_reward_cycle, + lock_period, + reward_set_indices, + )) + } + pub fn get_stacker_info( peer: &mut TestPeer, addr: &PrincipalData, @@ -1987,6 +2040,27 @@ pub mod test { make_tx(key, nonce, 1, payload) } + pub fn get_approved_aggregate_key( + peer: &mut TestPeer<'_>, + latest_block_id: StacksBlockId, + reward_cycle: u128, + ) -> Option { + let key_opt = readonly_call( + peer, + &latest_block_id, + SIGNERS_VOTING_NAME.into(), + "get-approved-aggregate-key".into(), + vec![Value::UInt(reward_cycle)], + ) + .expect_optional() + .unwrap(); + key_opt.map(|key_value| { + let data = key_value.expect_buff(33).unwrap(); + let compressed_data = Compressed::try_from(data.as_slice()).unwrap(); + Point::try_from(&compressed_data).unwrap() + }) + } + pub fn make_pox_2_increase( key: &StacksPrivateKey, nonce: u64, diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 1be2bb5ba3..be7675c700 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -26,6 +26,7 @@ use clarity::vm::errors::{ }; use clarity::vm::eval; use clarity::vm::events::{STXEventType, STXLockEventData, StacksTransactionEvent}; +use clarity::vm::functions::principals; use clarity::vm::representations::SymbolicExpression; use clarity::vm::tests::{execute, is_committed, is_err_code, symbols_from_values}; use clarity::vm::types::Value::Response; @@ -53,12 +54,17 @@ use crate::chainstate::burn::operations::*; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::tests::pox_addr_from; use crate::chainstate::nakamoto::test_signers::TestSigners; +use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; use crate::chainstate::stacks::boot::pox_2_tests::{ check_pox_print_event, generate_pox_clarity_value, get_partial_stacked, get_reward_cycle_total, get_reward_set_entries_at, get_stacking_state_pox, get_stx_account_at, with_clarity_db_ro, PoxPrintFields, StackingStateCheckData, }; +use crate::chainstate::stacks::boot::signers_tests::{ + get_signer_index, prepare_signers_test, readonly_call, +}; +use crate::chainstate::stacks::boot::signers_voting_tests::{make_dummy_tx, nakamoto_tenure}; use crate::chainstate::stacks::boot::{ PoxVersions, BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, MINERS_NAME, POX_2_NAME, POX_3_NAME, @@ -4492,7 +4498,7 @@ fn stack_agg_increase() { .clone() .expect_u128() .unwrap(), - Some(alice_signature_increase), + Some(alice_signature_increase.clone()), &alice.public_key, u128::MAX, 1, @@ -4590,16 +4596,52 @@ fn stack_agg_increase() { &bob_err_increase_result_expected ); + let bob_aggregate_increase_tx = &tx_block.receipts.get(4).unwrap(); + // Fetch the aggregate increase result & check that value is true - let bob_aggregate_increase_result = &tx_block - .receipts - .get(4) - .unwrap() + let bob_aggregate_increase_result = bob_aggregate_increase_tx .result .clone() .expect_result_ok() .unwrap(); - assert_eq!(bob_aggregate_increase_result, &Value::Bool(true)); + assert_eq!(bob_aggregate_increase_result, Value::Bool(true)); + + let aggregation_increase_event = &bob_aggregate_increase_tx.events[0]; + + let expected_result = Value::okay(Value::Tuple( + TupleData::from_data(vec![ + ( + "stacker".into(), + Value::Principal(PrincipalData::from(bob.address.clone())), + ), + ("total-locked".into(), Value::UInt(min_ustx * 2)), + ]) + .unwrap(), + )) + .unwrap(); + + let increase_op_data = HashMap::from([ + ( + "signer-sig", + Value::some(Value::buff_from(alice_signature_increase).unwrap()).unwrap(), + ), + ( + "signer-key", + Value::buff_from(alice.public_key.to_bytes_compressed()).unwrap(), + ), + ("max-amount", Value::UInt(u128::MAX)), + ("auth-id", Value::UInt(1)), + ]); + + let common_data = PoxPrintFields { + op_name: "stack-aggregation-increase".to_string(), + stacker: Value::Principal(PrincipalData::from(bob.address.clone())), + balance: Value::UInt(1000000000000000000), + locked: Value::UInt(0), + burnchain_unlock_height: Value::UInt(0), + }; + + check_pox_print_event(&aggregation_increase_event, common_data, increase_op_data); // Check that Bob's second pool has an assigned reward index of 1 let bob_aggregate_commit_reward_index = &tx_block @@ -6407,106 +6449,1713 @@ fn delegate_stack_increase() { assert_eq!(&reward_entry.signer.unwrap(), signer_pk_bytes.as_slice()); } -// In this test case, Alice delegates twice the stacking minimum to Bob. -// Bob stacks Alice's funds, and then immediately tries to stacks-aggregation-increase. -// This should return a clarity user error. -#[test] -fn delegate_stack_increase_err() { - let lock_period: u128 = 2; - let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); +pub fn pox_4_scenario_test_setup<'a>( + test_name: &str, + observer: &'a TestEventObserver, + initial_balances: Vec<(PrincipalData, u64)>, +) -> ( + TestPeer<'a>, + usize, + u64, + u128, + u128, + u128, + u128, + TestPeerConfig, +) { + // Setup code extracted from your original test + let test_signers = TestSigners::default(); + let aggregate_public_key = test_signers.aggregate_public_key.clone(); + let mut peer_config = TestPeerConfig::new(function_name!(), 0, 0); + let private_key = peer_config.private_key.clone(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); - let alice_nonce = 0; - let alice_key = &keys[0]; - let alice_address = PrincipalData::from(key_to_stacks_addr(alice_key)); - let mut bob_nonce = 0; - let bob_delegate_key = &keys[1]; - let bob_delegate_address = PrincipalData::from(key_to_stacks_addr(bob_delegate_key)); - let min_ustx = get_stacking_minimum(&mut peer, &latest_block); - let signer_sk = StacksPrivateKey::from_seed(&[1, 3, 3, 7]); - let signer_pk = StacksPublicKey::from_private(&signer_sk); - let signer_pk_bytes = signer_pk.to_bytes_compressed(); - let signer_key_val = Value::buff_from(signer_pk_bytes.clone()).unwrap(); + peer_config.aggregate_public_key = Some(aggregate_public_key.clone()); + peer_config + .stacker_dbs + .push(boot_code_id(MINERS_NAME, false)); + peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(1000)); + peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config + .initial_balances + .append(&mut initial_balances.clone()); + peer_config.burnchain.pox_constants.v2_unlock_height = 81; + peer_config.burnchain.pox_constants.pox_3_activation_height = 101; + peer_config.burnchain.pox_constants.v3_unlock_height = 102; + peer_config.burnchain.pox_constants.pox_4_activation_height = 105; + peer_config.test_signers = Some(test_signers.clone()); + peer_config.burnchain.pox_constants.reward_cycle_length = 20; + peer_config.burnchain.pox_constants.prepare_length = 5; - let pox_addr = PoxAddress::from_legacy( - AddressHashMode::SerializeP2PKH, - key_to_stacks_addr(bob_delegate_key).bytes, - ); + let mut peer = TestPeer::new_with_observer(peer_config.clone(), Some(&observer)); - let next_reward_cycle = 1 + burnchain - .block_height_to_reward_cycle(block_height) - .unwrap(); + let mut peer_nonce = 0; - let delegate_stx = make_pox_4_delegate_stx( - alice_key, - alice_nonce, - 2 * min_ustx, - bob_delegate_address.clone(), - None, - Some(pox_addr.clone()), - ); + let reward_cycle_len = peer.config.burnchain.pox_constants.reward_cycle_length; + let prepare_phase_len = peer.config.burnchain.pox_constants.prepare_length; - let alice_principal = PrincipalData::from(key_to_stacks_addr(alice_key)); + let target_height = peer.config.burnchain.pox_constants.pox_4_activation_height; + let mut latest_block = None; - let delegate_stack_stx = make_pox_4_delegate_stack_stx( - bob_delegate_key, - bob_nonce, - alice_principal, - min_ustx * 2, - pox_addr.clone(), - block_height as u128, - lock_period, - ); + while peer.get_burn_block_height() < u64::from(target_height) { + latest_block = Some(peer.tenure_with_txs(&[], &mut peer_nonce)); + observer.get_blocks(); + } + let latest_block = latest_block.expect("Failed to get tip"); - let txs = vec![delegate_stx, delegate_stack_stx]; + let reward_cycle = get_current_reward_cycle(&peer, &peer.config.burnchain); + let next_reward_cycle = reward_cycle.wrapping_add(1); + let burn_block_height = peer.get_burn_block_height(); + let current_block_height = peer.config.current_block; + let min_ustx = get_stacking_minimum(&mut peer, &latest_block); - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + ( + peer, + peer_nonce, + burn_block_height, + target_height as u128, + reward_cycle as u128, + next_reward_cycle as u128, + min_ustx as u128, + peer_config.clone(), + ) +} - bob_nonce += 1; +// In this test two solo stacker-signers Alice & Bob sign & stack +// for two reward cycles. Alice provides a signature, Bob uses +// 'set-signer-key-authorizations' to authorize. Two cycles later, +// when no longer stacked, they both try replaying their auths. +#[test] +fn test_scenario_one() { + // Alice solo stacker-signer setup + let mut alice = StackerSignerInfo::new(); + // Bob solo stacker-signer setup + let mut bob = StackerSignerInfo::new(); + let default_initial_balances: u64 = 1_000_000_000_000_000_000; + let initial_balances = vec![ + (alice.principal.clone(), default_initial_balances), + (bob.principal.clone(), default_initial_balances), + ]; - let signature = make_signer_key_signature( - &pox_addr, - &signer_sk, - next_reward_cycle.into(), - &Pox4SignatureTopic::AggregationIncrease, - 1_u128, + let observer = TestEventObserver::new(); + let ( + mut peer, + mut peer_nonce, + burn_block_height, + target_height, + reward_cycle, + next_reward_cycle, + min_ustx, + peer_config, + ) = pox_4_scenario_test_setup("test_scenario_one", &observer, initial_balances); + + // Alice Signatures + let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; + let lock_period = 1; + let alice_signature = make_signer_key_signature( + &alice.pox_address, + &alice.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, u128::MAX, 1, ); + let alice_signature_err = make_signer_key_signature( + &alice.pox_address, + &alice.private_key, + reward_cycle - 1, + &Pox4SignatureTopic::StackStx, + lock_period, + 100, + 2, + ); - // Bob's Aggregate Increase - let bobs_aggregate_increase = make_pox_4_aggregation_increase( - &bob_delegate_key, - bob_nonce, - &pox_addr, - next_reward_cycle.into(), - 0, - Some(signature), - &signer_pk, + // Bob Authorizations + let bob_authorization_low = make_pox_4_set_signer_key_auth( + &bob.pox_address, + &bob.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + true, + bob.nonce, + Some(&bob.private_key), + 100, + 2, + ); + bob.nonce += 1; + let bob_authorization = make_pox_4_set_signer_key_auth( + &bob.pox_address, + &bob.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + true, + bob.nonce, + Some(&bob.private_key), u128::MAX, + 3, + ); + bob.nonce += 1; + + // Alice stacks + let alice_err_nonce = alice.nonce; + let alice_stack_err = make_pox_4_lockup( + &alice.private_key, + alice_err_nonce, + amount, + &alice.pox_address, + lock_period, + &alice.public_key, + burn_block_height, + Some(alice_signature_err), + 100, 1, ); - let txs = vec![bobs_aggregate_increase]; + let alice_stack_nonce = alice_err_nonce + 1; + let alice_stack = make_pox_4_lockup( + &alice.private_key, + alice_stack_nonce, + amount, + &alice.pox_address, + lock_period, + &alice.public_key, + burn_block_height, + Some(alice_signature.clone()), + u128::MAX, + 1, + ); + alice.nonce = alice_stack_nonce + 1; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + // Bob stacks + let bob_nonce_stack_err = bob.nonce; + let bob_stack_err = make_pox_4_lockup( + &bob.private_key, + bob_nonce_stack_err, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + None, + 100, + 2, + ); + let bob_nonce_stack = bob_nonce_stack_err + 1; + let bob_stack = make_pox_4_lockup( + &bob.private_key, + bob_nonce_stack, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + None, + u128::MAX, + 3, + ); + bob.nonce = bob_nonce_stack + 1; - let delegate_transactions = - get_last_block_sender_transactions(&observer, key_to_stacks_addr(bob_delegate_key)); + let txs = vec![ + bob_authorization_low, + bob_authorization, + alice_stack_err, + alice_stack, + bob_stack_err, + bob_stack, + ]; - let actual_result = delegate_transactions.first().cloned().unwrap().result; + // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase) + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer.config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Verify Alice stacked + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &alice.principal) + .expect("Failed to find alice initial stack-stx"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, alice.pox_address); + + // Verify Bob stacked + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &bob.principal) + .expect("Failed to find bob initial stack-stx"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, bob.pox_address); + + // 1. Check bob's low authorization transaction + let bob_tx_result_low = tx_block + .receipts + .get(1) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(bob_tx_result_low, Value::Bool(true)); - // Should be a DELEGATION NO REWARD SLOT error - let expected_result = Value::error(Value::Int(28)).unwrap(); + // 2. Check bob's expected authorization transaction + let bob_tx_result_ok = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(bob_tx_result_ok, Value::Bool(true)); - assert_eq!(actual_result, expected_result); + // 3. Check alice's low stack transaction + let alice_tx_result_err = tx_block + .receipts + .get(3) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_tx_result_err, Value::Int(38)); - // test that the reward set is empty - let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert!(reward_set.is_empty()); -} + // Get alice's expected stack transaction + let alice_tx_result_ok = tx_block + .receipts + .get(4) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // 4.1 Check amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = alice_tx_result_ok + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // 4.2 Check signer key + let signer_key_expected = Value::buff_from(alice.public_key.to_bytes_compressed()).unwrap(); + let signer_key_actual = alice_tx_result_ok + .data_map + .get("signer-key") + .unwrap() + .clone(); + assert_eq!(signer_key_expected, signer_key_actual); + + // 4.3 Check unlock height + let unlock_height_expected = Value::UInt( + peer.config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64 + lock_period as u64) + .wrapping_sub(1) as u128, + ); + let unlock_height_actual = alice_tx_result_ok + .data_map + .get("unlock-burn-height") + .unwrap() + .clone(); + assert_eq!(unlock_height_expected, unlock_height_actual); + + // 5. Check bob's error stack transaction + let bob_tx_result_err = tx_block + .receipts + .get(5) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(bob_tx_result_err, Value::Int(38)); + + // Get bob's expected stack transaction + let bob_tx_result_ok = tx_block + .receipts + .get(6) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // 6.1 Check amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = bob_tx_result_ok + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // 6.2 Check signer key + let signer_key_expected = Value::buff_from(bob.public_key.to_bytes_compressed()).unwrap(); + let signer_key_actual = bob_tx_result_ok.data_map.get("signer-key").unwrap().clone(); + assert_eq!(signer_key_expected, signer_key_actual); + + // 6.3 Check unlock height (end of cycle 7 - block 140) + let unlock_height_expected = Value::UInt( + peer.config + .burnchain + .reward_cycle_to_block_height((next_reward_cycle + lock_period) as u64) + .wrapping_sub(1) as u128, + ); + let unlock_height_actual = bob_tx_result_ok + .data_map + .get("unlock-burn-height") + .unwrap() + .clone(); + assert_eq!(unlock_height_expected, unlock_height_actual); + + // Now starting create vote txs + // Fetch signer indices in reward cycle 6 + let alice_index = get_signer_index( + &mut peer, + latest_block, + alice.address.clone(), + next_reward_cycle, + ); + let bob_index = get_signer_index( + &mut peer, + latest_block, + bob.address.clone(), + next_reward_cycle, + ); + // Alice vote + let alice_vote = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + alice.nonce += 1; + // Bob vote + let bob_vote = make_signers_vote_for_aggregate_public_key( + &bob.private_key, + bob.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + bob.nonce += 1; + let txs = vec![alice_vote, bob_vote]; + + let target_reward_cycle = 8; + // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) + let mut target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(target_reward_cycle as u64); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) + .expect("No approved key found"); + + // Start replay transactions + // Alice stacks with a replayed signature + let alice_replay_nonce = alice.nonce; + let alice_stack_replay = make_pox_4_lockup( + &alice.private_key, + alice_replay_nonce, + amount, + &alice.pox_address, + lock_period, + &alice.public_key, + 161, + Some(alice_signature.clone()), + u128::MAX, + 1, + ); + // Bob stacks with a replayed authorization + let bob_nonce_stack_replay = bob.nonce; + let bob_stack_replay = make_pox_4_lockup( + &bob.private_key, + bob_nonce_stack_replay, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + 161, + None, + u128::MAX, + 3, + ); + let txs = vec![alice_stack_replay, bob_stack_replay]; + + // Commit replay txs & advance to the second burn block of reward cycle 8 (block 162) + target_height += 1; + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Check Alice replay, expect (err 35) - ERR_INVALID_SIGNATURE_PUBKEY + let alice_replay_result = tx_block + .receipts + .get(1) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_replay_result, Value::Int(35)); + + // Check Bob replay, expect (err 19) - ERR_SIGNER_AUTH_USED + let bob_tx_result = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(bob_tx_result, Value::Int(19)); +} + +// In this test two solo service signers, Alice & Bob, provide auth +// for Carl & Dave, solo stackers. Alice provides a signature for Carl, +// Bob uses 'set-signer-key...' for Dave. +#[test] +fn test_scenario_two() { + // Alice service signer setup + let mut alice = StackerSignerInfo::new(); + // Bob service signer setup + let mut bob = StackerSignerInfo::new(); + // Carl solo stacker setup + let mut carl = StackerSignerInfo::new(); + // Dave solo stacker setup + let mut dave = StackerSignerInfo::new(); + + let default_initial_balances = 1_000_000_000_000_000_000; + let initial_balances = vec![ + (alice.principal.clone(), default_initial_balances), + (bob.principal.clone(), default_initial_balances), + (carl.principal.clone(), default_initial_balances), + (dave.principal.clone(), default_initial_balances), + ]; + let observer = TestEventObserver::new(); + let ( + mut peer, + mut peer_nonce, + burn_block_height, + target_height, + reward_cycle, + next_reward_cycle, + min_ustx, + peer_config, + ) = pox_4_scenario_test_setup("test_scenario_two", &observer, initial_balances); + + // Alice Signature For Carl + let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; + let lock_period = 1; + let alice_signature_for_carl = make_signer_key_signature( + &carl.pox_address, + &alice.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + u128::MAX, + 1, + ); + // Bob Authorization For Dave + let bob_authorization_for_dave = make_pox_4_set_signer_key_auth( + &dave.pox_address, + &bob.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + true, + bob.nonce, + Some(&bob.private_key), + u128::MAX, + 1, + ); + bob.nonce += 1; + + // Carl Stacks w/ Alices Signature - Malformed (lock period) + let carl_stack_err = make_pox_4_lockup( + &carl.private_key, + carl.nonce, + amount, + &carl.pox_address, + lock_period + 1, + &alice.public_key, + burn_block_height, + Some(alice_signature_for_carl.clone()), + u128::MAX, + 1, + ); + carl.nonce += 1; + + // Carl Stacks w/ Alices Signature + let carl_stack = make_pox_4_lockup( + &carl.private_key, + carl.nonce, + amount, + &carl.pox_address, + lock_period, + &alice.public_key, + burn_block_height, + Some(alice_signature_for_carl.clone()), + u128::MAX, + 1, + ); + carl.nonce += 1; + + // Dave Stacks w/ Bobs Authorization - Malformed (pox) + let dave_stack_err = make_pox_4_lockup( + &dave.private_key, + dave.nonce, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + None, + u128::MAX, + 1, + ); + dave.nonce += 1; + + // Dave Stacks w/ Bobs Authorization + let dave_stack = make_pox_4_lockup( + &dave.private_key, + dave.nonce, + amount, + &dave.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + None, + u128::MAX, + 1, + ); + dave.nonce += 1; + + let txs = vec![ + bob_authorization_for_dave, + carl_stack_err, + carl_stack, + dave_stack_err, + dave_stack, + ]; + + // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase for reward cycle 6) + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Verify Carl Stacked + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &carl.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, carl.pox_address); + + // Verify Dave Stacked + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &dave.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, dave.pox_address); + + // Check Carl's malformed signature stack transaction (err 35 - INVALID_SIGNATURE_PUBKEY) + let carl_tx_result_err = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(carl_tx_result_err, Value::Int(35)); + + // Check Carl's expected stack transaction + let carl_tx_result_ok = tx_block + .receipts + .get(3) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // Check Carl amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = carl_tx_result_ok + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // Check Carl signer key + let signer_key_expected = Value::buff_from(alice.public_key.to_bytes_compressed()).unwrap(); + let signer_key_actual = carl_tx_result_ok + .data_map + .get("signer-key") + .unwrap() + .clone(); + assert_eq!(signer_key_expected, signer_key_actual); + + // Check Dave's malformed pox stack transaction (err 19 - INVALID_SIGNER_AUTH) + let dave_tx_result_err = tx_block + .receipts + .get(4) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(dave_tx_result_err, Value::Int(19)); + + // Check Dave's expected stack transaction + let dave_tx_result_ok = tx_block + .receipts + .get(5) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // Check Dave amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = dave_tx_result_ok + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // Check Dave signer key + let signer_key_expected = Value::buff_from(bob.public_key.to_bytes_compressed()).unwrap(); + let signer_key_actual = dave_tx_result_ok + .data_map + .get("signer-key") + .unwrap() + .clone(); + assert_eq!(signer_key_expected, signer_key_actual); + + // Now starting create vote txs + // Fetch signer indices in reward cycle 6 + let alice_index = get_signer_index( + &mut peer, + latest_block, + alice.address.clone(), + next_reward_cycle, + ); + let bob_index = get_signer_index( + &mut peer, + latest_block, + bob.address.clone(), + next_reward_cycle, + ); + // Alice expected vote + let alice_vote_expected = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + alice.nonce += 1; + // Alice duplicate vote + let alice_vote_duplicate = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + alice.nonce += 1; + // Bob vote err (err 17 - INVALID_ROUND) + let bob_vote_err = make_signers_vote_for_aggregate_public_key( + &bob.private_key, + bob.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 3, + next_reward_cycle, + ); + bob.nonce += 1; + // Bob expected vote + let bob_vote_expected = make_signers_vote_for_aggregate_public_key( + &bob.private_key, + bob.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + bob.nonce += 1; + let txs = vec![ + alice_vote_expected, + alice_vote_duplicate, + bob_vote_err, + bob_vote_expected, + ]; + + let target_reward_cycle = 8; + // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(target_reward_cycle as u64); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Check Alice's expected vote + let alice_expected_vote = tx_block + .receipts + .get(1) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(alice_expected_vote, Value::Bool(true)); + + // Check Alice's duplicate vote (err 15 - DUPLICATE_ROUND) + let alice_duplicate_vote = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_duplicate_vote, Value::UInt(15)); + + // Check Bob's round err vote (err 17 - INVALID_ROUND) + let bob_round_err_vote = tx_block + .receipts + .get(3) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(bob_round_err_vote, Value::UInt(17)); + + // Check Bob's expected vote + let bob_expected_vote = tx_block + .receipts + .get(4) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(bob_expected_vote, Value::Bool(true)); +} + +// In this scenario, two solo stacker-signers (Alice, Bob), one service signer (Carl), +// one stacking pool operator (Dave), & three pool stackers (Eve, Frank, Grace). +#[test] +fn test_scenario_three() { + // Alice stacker signer setup + let mut alice = StackerSignerInfo::new(); + // Bob stacker signer setup + let mut bob = StackerSignerInfo::new(); + // Carl service signer setup + let carl = StackerSignerInfo::new(); + // David stacking pool operator setup + let mut david = StackerSignerInfo::new(); + // Eve pool stacker setup + let mut eve = StackerSignerInfo::new(); + // Frank pool stacker setup + let mut frank = StackerSignerInfo::new(); + // Grace pool stacker setup + let mut grace = StackerSignerInfo::new(); + + let default_initial_balances = 1_000_000_000_000_000_000; + let initial_balances = vec![ + (alice.principal.clone(), default_initial_balances), + (bob.principal.clone(), default_initial_balances), + (carl.principal.clone(), default_initial_balances), + (david.principal.clone(), default_initial_balances), + (eve.principal.clone(), default_initial_balances), + (frank.principal.clone(), default_initial_balances), + (grace.principal.clone(), default_initial_balances), + ]; + let observer = TestEventObserver::new(); + let ( + mut peer, + mut peer_nonce, + burn_block_height, + target_height, + reward_cycle, + next_reward_cycle, + min_ustx, + peer_config, + ) = pox_4_scenario_test_setup("test_scenario_three", &observer, initial_balances); + + let lock_period = 2; + let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; + let alice_signature_for_alice_err = make_signer_key_signature( + &alice.pox_address, + &alice.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + 13, + u128::MAX, + 1, + ); + let alice_signature_for_alice_expected = make_signer_key_signature( + &alice.pox_address, + &alice.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + u128::MAX, + 1, + ); + let bob_signature_for_bob_err = make_signer_key_signature( + &bob.pox_address, + &bob.private_key, + reward_cycle - 1, + &Pox4SignatureTopic::StackStx, + lock_period, + u128::MAX, + 1, + ); + let bob_signature_for_bob_expected = make_signer_key_signature( + &bob.pox_address, + &bob.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + u128::MAX, + 1, + ); + let carl_signature_for_david_err = make_signer_key_signature( + &david.pox_address, + &carl.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + 1, + u128::MAX, + 1, + ); + let carl_signature_for_david = make_signer_key_signature( + &david.pox_address, + &carl.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 1, + ); + // Alice solo stack, error + let alice_stack_tx_err = make_pox_4_lockup( + &alice.private_key, + alice.nonce, + amount, + &alice.pox_address, + lock_period, + &alice.public_key, + burn_block_height, + Some(alice_signature_for_alice_err.clone()), + u128::MAX, + 1, + ); + alice.nonce += 1; + // Alice solo stack + let alice_stack_tx_expected = make_pox_4_lockup( + &alice.private_key, + alice.nonce, + amount, + &alice.pox_address, + lock_period, + &alice.public_key, + burn_block_height, + Some(alice_signature_for_alice_expected), + u128::MAX, + 1, + ); + alice.nonce += 1; + // Bob solo stack, error + let bob_stack_tx_err = make_pox_4_lockup( + &bob.private_key, + bob.nonce, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + Some(bob_signature_for_bob_err.clone()), + u128::MAX, + 1, + ); + bob.nonce += 1; + // Bob solo stack + let bob_stack_tx_expected = make_pox_4_lockup( + &bob.private_key, + bob.nonce, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + Some(bob_signature_for_bob_expected), + u128::MAX, + 1, + ); + bob.nonce += 1; + // Eve pool stacker delegating STX to David + let eve_delegate_stx_to_david_tx = make_pox_4_delegate_stx( + &eve.private_key, + eve.nonce, + amount, + david.principal.clone(), + Some( + peer.config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .into(), + ), + Some(david.pox_address.clone()), + ); + eve.nonce += 1; + // Frank pool stacker delegating STX to David + let frank_delegate_stx_to_david_tx = make_pox_4_delegate_stx( + &frank.private_key, + frank.nonce, + amount, + david.principal.clone(), + None, + Some(david.pox_address.clone()), + ); + frank.nonce += 1; + // Grace pool stacker delegating STX to David + let grace_delegate_stx_to_david_tx = make_pox_4_delegate_stx( + &grace.private_key, + grace.nonce, + amount, + david.principal.clone(), + None, + Some(david.pox_address.clone()), + ); + grace.nonce += 1; + // Alice error delegating while stacked + let alice_delegate_stx_to_david_err = make_pox_4_delegate_stx( + &alice.private_key, + alice.nonce, + amount, + david.principal.clone(), + None, + Some(david.pox_address.clone()), + ); + // Collecting all the pool stackers + let davids_stackers = &[ + (eve.clone(), lock_period), + (frank.clone(), lock_period), + (grace.clone(), lock_period), + (alice.clone(), lock_period), + ]; + let davids_delegate_stack_stx_txs: Vec<_> = davids_stackers + .iter() + .map(|(stacker, lock_period)| { + let tx = make_pox_4_delegate_stack_stx( + &david.private_key, + david.nonce, + stacker.principal.clone(), + amount, + david.pox_address.clone(), + burn_block_height as u128, + *lock_period, + ); + david.nonce += 1; + tx + }) + .collect(); + // Aggregate commit david's pool stackers, error by committing for two cycles + let davids_aggregate_commit_index_tx_err_cycles = make_pox_4_aggregation_commit_indexed( + &david.private_key, + david.nonce, + &david.pox_address, + next_reward_cycle.wrapping_add(1), + Some(carl_signature_for_david.clone()), + &carl.public_key, + u128::MAX, + 1, + ); + david.nonce += 1; + // Aggregate commit david's pool stackers, error by committing for two cycles + let davids_aggregate_commit_index_tx_err_signature = make_pox_4_aggregation_commit_indexed( + &david.private_key, + david.nonce, + &david.pox_address, + next_reward_cycle, + Some(carl_signature_for_david_err.clone()), + &carl.public_key, + u128::MAX, + 1, + ); + david.nonce += 1; + // Aggregate commit david's pool stackers correctly + let davids_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &david.private_key, + david.nonce, + &david.pox_address, + next_reward_cycle, + Some(carl_signature_for_david.clone()), + &carl.public_key, + u128::MAX, + 1, + ); + david.nonce += 1; + + let mut txs = vec![ + alice_stack_tx_err, + alice_stack_tx_expected, + bob_stack_tx_err, + bob_stack_tx_expected, + eve_delegate_stx_to_david_tx, + frank_delegate_stx_to_david_tx, + grace_delegate_stx_to_david_tx, + alice_delegate_stx_to_david_err, + ]; + txs.extend(davids_delegate_stack_stx_txs); + txs.extend(vec![ + davids_aggregate_commit_index_tx_err_cycles, + davids_aggregate_commit_index_tx_err_signature, + davids_aggregate_commit_index_tx, + ]); + + // Commit txs in next block & advance to reward set calculation of the next reward cycle + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Start of test checks + // 1. Check that Alice can't stack with an lock_period different than signature + let alice_stack_tx_err = tx_block + .receipts + .get(1) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_stack_tx_err, Value::Int(35)); + + // 2. Check that Alice can solo stack-sign + let alice_stack_tx_ok = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // Check Alice amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = alice_stack_tx_ok + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // Check Alice signer key + let signer_key_expected = Value::buff_from(alice.public_key.to_bytes_compressed()).unwrap(); + let signer_key_actual = alice_stack_tx_ok + .data_map + .get("signer-key") + .unwrap() + .clone(); + assert_eq!(signer_key_expected, signer_key_actual); + + // 3. Check that Bob can't stack with a signature that points to a reward cycle in the past + let bob_stack_tx_err = tx_block + .receipts + .get(3) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(bob_stack_tx_err, Value::Int(35)); + + // 4. Check that Bob can solo stack-sign + let bob_stack_tx_ok = tx_block + .receipts + .get(4) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // Check Bob amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = bob_stack_tx_ok.data_map.get("lock-amount").unwrap().clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // Check Bob signer key + let signer_key_expected = Value::buff_from(bob.public_key.to_bytes_compressed()); + let signer_key_actual = bob_stack_tx_ok.data_map.get("signer-key").unwrap().clone(); + assert_eq!(signer_key_actual, signer_key_actual); + + // 5. Check that David can't delegate-stack-stx Eve if delegation expires during lock period + let eve_delegate_stx_to_david_err = tx_block + .receipts + .get(9) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(eve_delegate_stx_to_david_err, Value::Int(21)); + + // 6. Check that Frank is correctly delegated to David + let frank_delegate_stx_to_david_tx = tx_block + .receipts + .get(10) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // Check Frank amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = frank_delegate_stx_to_david_tx + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // Check Frank stacker address + let stacker_expected = Value::Principal(frank.address.clone().into()); + let stacker_actual = frank_delegate_stx_to_david_tx + .data_map + .get("stacker") + .unwrap() + .clone(); + assert_eq!(stacker_expected, stacker_actual); + + // 7. Check that Grace is correctly delegated to David + let grace_delegate_stx_to_david_tx = tx_block + .receipts + .get(11) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // Check Grace amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = grace_delegate_stx_to_david_tx + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // Check Grace stacker address + let stacker_expected = Value::Principal(grace.address.clone().into()); + let stacker_actual = grace_delegate_stx_to_david_tx + .data_map + .get("stacker") + .unwrap() + .clone(); + assert_eq!(stacker_expected, stacker_actual); + + // 8. Check that Alice can't delegate-stack if already stacking + let alice_delegate_stx_to_david_err = tx_block + .receipts + .get(12) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_delegate_stx_to_david_err, Value::Int(3)); + + // 9. Check that David can't aggregate-commit-indexed if pointing to a reward cycle in the future + let david_aggregate_commit_indexed_err = tx_block + .receipts + .get(13) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(david_aggregate_commit_indexed_err, Value::Int(35)); + + // 10. Check that David can aggregate-commit-indexed if using the incorrect signature topic + let david_aggregate_commit_indexed_err = tx_block + .receipts + .get(14) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(david_aggregate_commit_indexed_err, Value::Int(35)); + + // 11. Check that David can aggregate-commit-indexed successfully, checking stacking index = 2 + let david_aggregate_commit_indexed_ok = tx_block + .receipts + .get(15) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(david_aggregate_commit_indexed_ok, Value::UInt(2)); +} + +// In this test scenario two solo stacker-signers (Alice & Bob), +// test out the updated stack-extend & stack-increase functions +// across multiple cycles. +#[test] +fn test_scenario_four() { + // Alice service signer setup + let mut alice = StackerSignerInfo::new(); + // Bob service signer setup + let mut bob = StackerSignerInfo::new(); + + let default_initial_balances = 1_000_000_000_000_000_000; + let initial_balances = vec![ + (alice.principal.clone(), default_initial_balances), + (bob.principal.clone(), default_initial_balances), + ]; + let observer = TestEventObserver::new(); + let ( + mut peer, + mut peer_nonce, + burn_block_height, + target_height, + reward_cycle, + next_reward_cycle, + min_ustx, + peer_config, + ) = pox_4_scenario_test_setup("test_scenario_four", &observer, initial_balances); + + // Initial Alice Signature + let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; + let lock_period = 2; + let alice_signature_initial = make_signer_key_signature( + &alice.pox_address, + &alice.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + u128::MAX, + 1, + ); + // Extend Alice Signature Err (meant for Bob) + let alice_signature_extend_err = make_signer_key_signature( + &bob.pox_address, + &bob.private_key, + next_reward_cycle.wrapping_add(1), + &Pox4SignatureTopic::StackExtend, + lock_period, + u128::MAX, + 1, + ); + // Extend Alice Signature Expected + let alice_signature_extend = make_signer_key_signature( + &alice.pox_address, + &alice.private_key, + next_reward_cycle.wrapping_add(1), + &Pox4SignatureTopic::StackExtend, + lock_period, + u128::MAX, + 1, + ); + // Initial Bob Signature + let bob_signature_initial = make_signer_key_signature( + &bob.pox_address, + &bob.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + u128::MAX, + 1, + ); + // Alice initial stack + let alice_stack = make_pox_4_lockup( + &alice.private_key, + alice.nonce, + amount, + &alice.pox_address, + lock_period, + &alice.public_key, + burn_block_height, + Some(alice_signature_initial.clone()), + u128::MAX, + 1, + ); + alice.nonce += 1; + // Bob initial stack + let bob_stack = make_pox_4_lockup( + &bob.private_key, + bob.nonce, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + Some(bob_signature_initial.clone()), + u128::MAX, + 1, + ); + bob.nonce += 1; + + let txs = vec![alice_stack.clone(), bob_stack.clone()]; + + // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase for reward cycle 6) + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Verify Alice Stacked + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &alice.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, alice.pox_address); + + // Verify Bob Stacked + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &bob.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, bob.pox_address); + + // Now starting create vote txs + // Fetch signer indices in reward cycle 6 + let alice_index = get_signer_index( + &mut peer, + latest_block, + alice.address.clone(), + next_reward_cycle, + ); + let bob_index = get_signer_index( + &mut peer, + latest_block, + bob.address.clone(), + next_reward_cycle, + ); + // Alice err vote + let alice_vote_err = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + alice.nonce += 1; + // Alice expected vote + let alice_vote_expected = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + alice.nonce += 1; + // Bob expected vote + let bob_vote_expected = make_signers_vote_for_aggregate_public_key( + &bob.private_key, + bob.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + bob.nonce += 1; + let txs = vec![ + alice_vote_err.clone(), + alice_vote_expected.clone(), + bob_vote_expected.clone(), + ]; + + // Commit vote txs & move to the prepare phase of reward cycle 7 (block 155) + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(7 as u64) + .wrapping_add(15); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Check Alice's err vote (err 10 - INVALID_SIGNER_INDEX) + let alice_err_vote = tx_block + .receipts + .get(1) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_err_vote, Value::UInt(10)); + + // Check Alice's expected vote + let alice_expected_vote = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(alice_expected_vote, Value::Bool(true)); + + // Check Bob's expected vote + let bob_expected_vote = tx_block + .receipts + .get(3) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(bob_expected_vote, Value::Bool(true)); + + let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) + .expect("No approved key found"); + assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); + + // Alice stack-extend err tx + let alice_extend_err = make_pox_4_extend( + &alice.private_key, + alice.nonce, + alice.pox_address.clone(), + lock_period, + bob.public_key.clone(), + Some(alice_signature_extend_err.clone()), + u128::MAX, + 1, + ); + alice.nonce += 1; + // Alice stack-extend tx + let alice_extend = make_pox_4_extend( + &alice.private_key, + alice.nonce, + alice.pox_address.clone(), + lock_period, + alice.public_key.clone(), + Some(alice_signature_extend.clone()), + u128::MAX, + 1, + ); + alice.nonce += 1; + // Now starting second round of vote txs + // Fetch signer indices in reward cycle 7 + let alice_index = get_signer_index(&mut peer, latest_block, alice.address.clone(), 7); + // Alice err vote + let alice_vote_expected_err = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + 7, + ); + alice.nonce += 1; + + let txs = vec![ + alice_extend_err.clone(), + alice_extend.clone(), + alice_vote_expected_err.clone(), + ]; + let target_height = target_height.wrapping_add(1); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Check Alice's err stack-extend tx (err 35 - INVALID_SIGNATURE_PUBKEY) + let alice_err_extend = tx_block + .receipts + .get(1) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_err_extend, Value::Int(35)); + + // Check Alice's stack-extend tx + let alice_extend_receipt = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + + // Check Alice's expected err vote (err 14 - DUPLICATE_AGGREGATE_PUBLIC_KEY) + let alice_expected_vote_err = tx_block + .receipts + .get(3) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_expected_vote_err, Value::UInt(14)); + + // Get approved key & assert that it wasn't sent (None) + let approved_key = get_approved_aggregate_key(&mut peer, latest_block, 7); + assert_eq!(approved_key, None); +} + +// In this test case, Alice delegates twice the stacking minimum to Bob. +// Bob stacks Alice's funds, and then immediately tries to stacks-aggregation-increase. +// This should return a clarity user error. +#[test] +fn delegate_stack_increase_err() { + let lock_period: u128 = 2; + let observer = TestEventObserver::new(); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = + prepare_pox4_test(function_name!(), Some(&observer)); + + let alice_nonce = 0; + let alice_key = &keys[0]; + let alice_address = PrincipalData::from(key_to_stacks_addr(alice_key)); + let mut bob_nonce = 0; + let bob_delegate_key = &keys[1]; + let bob_delegate_address = PrincipalData::from(key_to_stacks_addr(bob_delegate_key)); + let min_ustx = get_stacking_minimum(&mut peer, &latest_block); + let signer_sk = StacksPrivateKey::from_seed(&[1, 3, 3, 7]); + let signer_pk = StacksPublicKey::from_private(&signer_sk); + let signer_pk_bytes = signer_pk.to_bytes_compressed(); + let signer_key_val = Value::buff_from(signer_pk_bytes.clone()).unwrap(); + + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(bob_delegate_key).bytes, + ); + + let next_reward_cycle = 1 + burnchain + .block_height_to_reward_cycle(block_height) + .unwrap(); + + let delegate_stx = make_pox_4_delegate_stx( + alice_key, + alice_nonce, + 2 * min_ustx, + bob_delegate_address.clone(), + None, + Some(pox_addr.clone()), + ); + + let alice_principal = PrincipalData::from(key_to_stacks_addr(alice_key)); + + let delegate_stack_stx = make_pox_4_delegate_stack_stx( + bob_delegate_key, + bob_nonce, + alice_principal, + min_ustx * 2, + pox_addr.clone(), + block_height as u128, + lock_period, + ); + + let txs = vec![delegate_stx, delegate_stack_stx]; + + let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + + bob_nonce += 1; + + let signature = make_signer_key_signature( + &pox_addr, + &signer_sk, + next_reward_cycle.into(), + &Pox4SignatureTopic::AggregationIncrease, + 1_u128, + u128::MAX, + 1, + ); + + // Bob's Aggregate Increase + let bobs_aggregate_increase = make_pox_4_aggregation_increase( + &bob_delegate_key, + bob_nonce, + &pox_addr, + next_reward_cycle.into(), + 0, + Some(signature), + &signer_pk, + u128::MAX, + 1, + ); + + let txs = vec![bobs_aggregate_increase]; + + let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + + let delegate_transactions = + get_last_block_sender_transactions(&observer, key_to_stacks_addr(bob_delegate_key)); + + let actual_result = delegate_transactions.first().cloned().unwrap().result; + + // Should be a DELEGATION NO REWARD SLOT error + let expected_result = Value::error(Value::Int(28)).unwrap(); + + assert_eq!(actual_result, expected_result); + + // test that the reward set is empty + let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert!(reward_set.is_empty()); +} pub fn get_stacking_state_pox_4( peer: &mut TestPeer, @@ -6827,32 +8476,274 @@ fn missed_slots_no_unlock() { let bob_lockup = make_simple_pox_4_lock(&bob, &mut peer, 1 * POX_THRESHOLD_STEPS_USTX, 6); - let txs = [alice_lockup, bob_lockup]; + let txs = [alice_lockup, bob_lockup]; + let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + + // check that the "raw" reward set will contain entries for alice and bob + // for the pox-4 cycles + for cycle_number in first_v4_cycle..first_v4_cycle + 6 { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!( + reward_set_entries.len(), + 2, + "Reward set should contain two entries in cycle {cycle_number}" + ); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + bob_address.bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[1].reward_address.bytes(), + alice_address.bytes.0.to_vec() + ); + } + + // we'll produce blocks until the next reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(first_v4_cycle) + 1; + let auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; + + // but first, check that bob has locked tokens at (height_target + 1) + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &bob_address.to_account_principal(), + ); + assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // check that the "raw" reward sets for all cycles contain entries for alice and bob still! + for cycle_number in first_v4_cycle..(first_v4_cycle + 6) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 2); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + bob_address.bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[1].reward_address.bytes(), + alice_address.bytes.0.to_vec() + ); + } + + let expected_unlock_height = burnchain.reward_cycle_to_block_height(first_v4_cycle + 6) - 1; + // now check that bob has an unlock height of `height_target` + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &bob_address.to_account_principal(), + ); + assert_eq!(bob_bal.unlock_height(), expected_unlock_height); + assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); + + let alice_bal = get_stx_account_at( + &mut peer, + &latest_block, + &alice_address.to_account_principal(), + ); + assert_eq!(alice_bal.unlock_height(), expected_unlock_height); + assert_eq!(alice_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX * 1024); + + // check that the total reward cycle amounts have not decremented + for cycle_number in first_v4_cycle..(first_v4_cycle + 6) { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + 1025 * POX_THRESHOLD_STEPS_USTX + ); + } + + // check that bob's stacking-state is gone and alice's stacking-state is correct + let bob_state = get_stacking_state_pox( + &mut peer, + &latest_block, + &bob_address.to_account_principal(), + PoxVersions::Pox4.get_name_str(), + ) + .expect("Bob should have stacking-state entry") + .expect_tuple() + .unwrap(); + let reward_indexes_str = bob_state.get("reward-set-indexes").unwrap().to_string(); + assert_eq!(reward_indexes_str, "(u1 u1 u1 u1 u1 u1)"); + + let alice_state = get_stacking_state_pox( + &mut peer, + &latest_block, + &alice_address.to_account_principal(), + PoxVersions::Pox4.get_name_str(), + ) + .expect("Alice should have stacking-state entry") + .expect_tuple() + .unwrap(); + let reward_indexes_str = alice_state.get("reward-set-indexes").unwrap().to_string(); + assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); + + // check that bob is still locked at next block + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &bob_address.to_account_principal(), + ); + assert_eq!(bob_bal.unlock_height(), expected_unlock_height); + assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); + + // now let's check some tx receipts + + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + let mut coinbase_txs = vec![]; + let mut reward_cycles_in_2_5 = 0u64; + + for b in blocks.into_iter() { + if let Some(ref reward_set_data) = b.reward_set_data { + let signers_set = reward_set_data.reward_set.signers.as_ref().unwrap(); + assert_eq!(signers_set.len(), 1); + assert_eq!( + StacksPublicKey::from_private(&alice).to_bytes_compressed(), + signers_set[0].signing_key.to_vec() + ); + let rewarded_addrs = HashSet::<_>::from_iter( + reward_set_data + .reward_set + .rewarded_addresses + .iter() + .map(|a| a.to_burnchain_repr()), + ); + assert_eq!(rewarded_addrs.len(), 1); + assert_eq!( + reward_set_data.reward_set.rewarded_addresses[0].bytes(), + alice_address.bytes.0.to_vec(), + ); + reward_cycles_in_2_5 += 1; + eprintln!("{:?}", b.reward_set_data) + } + + for (i, r) in b.receipts.into_iter().enumerate() { + if i == 0 { + coinbase_txs.push(r); + continue; + } + match r.transaction { + TransactionOrigin::Stacks(ref t) => { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } + } + _ => {} + } + } + } + + assert_eq!(alice_txs.len(), 1); + assert_eq!(bob_txs.len(), 1); + // only mined one 2.5 reward cycle, but make sure it was picked up in the events loop above + assert_eq!(reward_cycles_in_2_5, 1); + + // all should have committedd okay + assert!( + match bob_txs.get(&0).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Bob tx0 should have committed okay" + ); + + // Check that the event produced by "handle-unlock" has a well-formed print event + // and that this event is included as part of the coinbase tx + for unlock_coinbase_index in [auto_unlock_coinbase] { + // expect the unlock to occur 1 block after the handle-unlock method was invoked. + let expected_unlock_height = unlock_coinbase_index + EMPTY_SORTITIONS + 1; + let expected_cycle = pox_constants + .block_height_to_reward_cycle(0, expected_unlock_height) + .unwrap(); + assert!( + coinbase_txs[unlock_coinbase_index as usize].events.is_empty(), + "handle-unlock events are coinbase events and there should be no handle-unlock invocation in this test" + ); + } +} + +/// In this test case, we lockup enough to get participation to be non-zero, but not enough to qualify for a reward slot. +#[test] +fn no_lockups_2_5() { + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, mut pox_constants) = make_test_epochs_pox(); + pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + &function_name!(), + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = None; + + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + + let mut coinbase_nonce = 0; + + let first_v4_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_4_activation_height as u64) + .unwrap() + + 1; + + // produce blocks until epoch 2.5 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[7].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + + let bob_lockup = make_simple_pox_4_lock(&bob, &mut peer, 1 * POX_THRESHOLD_STEPS_USTX, 6); + + let txs = [bob_lockup]; let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); - // check that the "raw" reward set will contain entries for alice and bob - // for the pox-4 cycles + // check that the "raw" reward set will contain an entry for bob for cycle_number in first_v4_cycle..first_v4_cycle + 6 { let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); assert_eq!( reward_set_entries.len(), - 2, - "Reward set should contain two entries in cycle {cycle_number}" + 1, + "Reward set should contain one entry in cycle {cycle_number}" ); assert_eq!( reward_set_entries[0].reward_address.bytes(), bob_address.bytes.0.to_vec() ); - assert_eq!( - reward_set_entries[1].reward_address.bytes(), - alice_address.bytes.0.to_vec() - ); } // we'll produce blocks until the next reward cycle gets through the "handled start" code // this is one block after the reward cycle starts - let height_target = burnchain.reward_cycle_to_block_height(first_v4_cycle) + 1; + let height_target = burnchain.reward_cycle_to_block_height(first_v4_cycle + 1) + 1; let auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; // but first, check that bob has locked tokens at (height_target + 1) @@ -6867,254 +8758,748 @@ fn missed_slots_no_unlock() { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - // check that the "raw" reward sets for all cycles contain entries for alice and bob still! - for cycle_number in first_v4_cycle..(first_v4_cycle + 6) { - let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); - let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); - assert_eq!(reward_set_entries.len(), 2); - assert_eq!( - reward_set_entries[0].reward_address.bytes(), - bob_address.bytes.0.to_vec() - ); - assert_eq!( - reward_set_entries[1].reward_address.bytes(), - alice_address.bytes.0.to_vec() - ); + let blocks = observer.get_blocks(); + for b in blocks.into_iter() { + if let Some(ref reward_set_data) = b.reward_set_data { + assert_eq!(reward_set_data.reward_set.signers, Some(vec![])); + assert!(reward_set_data.reward_set.rewarded_addresses.is_empty()); + eprintln!("{:?}", b.reward_set_data) + } } +} - let expected_unlock_height = burnchain.reward_cycle_to_block_height(first_v4_cycle + 6) - 1; - // now check that bob has an unlock height of `height_target` - let bob_bal = get_stx_account_at( - &mut peer, - &latest_block, - &bob_address.to_account_principal(), +// In this scenario, two service signers (Alice, Bob), one stacker-signer (Carl), two stacking pool operators (Dave, Eve), & six pool stackers (Frank, Grace, Heidi, Ivan, Judy, Mallory). + +// First Nakamoto Reward Cycle +// First Nakamoto Tenure + +// 1. Franks stacks for 1 reward cycle, Grace stacks for 2 reward cycles & so on…Mallory stacks for 6 reward cycles: (so 6 wallets stacking n, n+1, n+2… cycles) +// 2. Dave asks Alice for 3 signatures +// 3. Eve asks Bob for 3 set-authorizations +// 4. Ivan - Mallory ask Bob to set-approval-authorization +// 5. Carl stx-stacks & self-signs for 3 reward cycle +// 6. In Carl's second reward cycle, he calls stx-extend for 3 more reward cycles +// 7. In Carl's third reward cycle, he calls stx-increase and should fail as he is straddling 2 keys +#[test] +fn test_scenario_five() { + // Alice service signer setup + let mut alice = StackerSignerInfo::new(); + // Bob service signer setup + let mut bob = StackerSignerInfo::new(); + // Carl solo stacker and signer setup + let mut carl = StackerSignerInfo::new(); + // David stacking pool operator (delegating signing to Alice) Setup + let mut david = StackerSignerInfo::new(); + // Eve stacking pool operator (delegating signing to Bob) Setup + let mut eve = StackerSignerInfo::new(); + // Frank pool stacker delegating STX to David + let mut frank = StackerSignerInfo::new(); + // Grace pool stacker delegating STX to David + let mut grace = StackerSignerInfo::new(); + // Heidi pool stacker delegating STX to David + let mut heidi = StackerSignerInfo::new(); + // Ivan pool stacker delegating STX to Eve + let mut ivan = StackerSignerInfo::new(); + // Jude pool stacker delegating STX to Eve + let mut jude = StackerSignerInfo::new(); + // Mallory pool stacker delegating STX to Eve + let mut mallory = StackerSignerInfo::new(); + + let default_initial_balances = 1_000_000_000_000_000_000; + let initial_balances = vec![ + (alice.principal.clone(), default_initial_balances), + (bob.principal.clone(), default_initial_balances), + (carl.principal.clone(), default_initial_balances), + (david.principal.clone(), default_initial_balances), + (eve.principal.clone(), default_initial_balances), + (frank.principal.clone(), default_initial_balances), + (grace.principal.clone(), default_initial_balances), + (heidi.principal.clone(), default_initial_balances), + (ivan.principal.clone(), default_initial_balances), + (jude.principal.clone(), default_initial_balances), + (mallory.principal.clone(), default_initial_balances), + ]; + let observer = TestEventObserver::new(); + let ( + mut peer, + mut peer_nonce, + burn_block_height, + target_height, + reward_cycle, + next_reward_cycle, + min_ustx, + mut peer_config, + ) = pox_4_scenario_test_setup("test_scenario_five", &observer, initial_balances); + + // Lock periods for each stacker + let carl_lock_period = 3; + let frank_lock_period = 1; + let grace_lock_period = 2; + let heidi_lock_period = 3; + let ivan_lock_period = 4; + let jude_lock_period = 5; + let mallory_lock_period = 6; + + let carl_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(carl_lock_period) as u64) + as u128; + let frank_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(frank_lock_period) as u64) + as u128; + let grace_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(grace_lock_period) as u64) + as u128; + let heidi_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(heidi_lock_period) as u64) + as u128; + let ivan_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(ivan_lock_period) as u64) + as u128; + let jude_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(jude_lock_period) as u64) + as u128; + let mallory_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(mallory_lock_period) as u64) + as u128; + + // The pool operators should delegate their signing power for as long as their longest stacker + let david_lock_period = heidi_lock_period; + let eve_lock_period = mallory_lock_period; + + let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; + let carl_signature_for_carl = make_signer_key_signature( + &carl.pox_address, + &carl.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + carl_lock_period, + u128::MAX, + 1, ); - assert_eq!(bob_bal.unlock_height(), expected_unlock_height); - assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); + let carl_stack_tx = make_pox_4_lockup( + &carl.private_key, + carl.nonce, + amount, + &carl.pox_address, + carl_lock_period, + &carl.public_key, + burn_block_height, + Some(carl_signature_for_carl), + u128::MAX, + 1, + ); + carl.nonce += 1; - let alice_bal = get_stx_account_at( - &mut peer, - &latest_block, - &alice_address.to_account_principal(), + // Frank pool stacker delegating STX to David + let frank_delegate_stx_to_david_tx = make_pox_4_delegate_stx( + &frank.private_key, + frank.nonce, + amount, + david.principal.clone(), + Some(frank_end_burn_height), + Some(david.pox_address.clone()), ); - assert_eq!(alice_bal.unlock_height(), expected_unlock_height); - assert_eq!(alice_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX * 1024); + frank.nonce += 1; - // check that the total reward cycle amounts have not decremented - for cycle_number in first_v4_cycle..(first_v4_cycle + 6) { - assert_eq!( - get_reward_cycle_total(&mut peer, &latest_block, cycle_number), - 1025 * POX_THRESHOLD_STEPS_USTX - ); + // Grace pool stacker delegating STX to David + let grace_delegate_stx_to_david_tx = make_pox_4_delegate_stx( + &grace.private_key, + grace.nonce, + amount, + david.principal.clone(), + Some(grace_end_burn_height), + Some(david.pox_address.clone()), + ); + grace.nonce += 1; + + // Heidi pool stacker delegating STX to David + let heidi_delegate_stx_to_david_tx = make_pox_4_delegate_stx( + &heidi.private_key, + heidi.nonce, + amount, + david.principal.clone(), + Some(heidi_end_burn_height), + Some(david.pox_address.clone()), + ); + heidi.nonce += 1; + + // Ivan pool stacker delegating STX to Eve + let ivan_delegate_stx_to_eve_tx = make_pox_4_delegate_stx( + &ivan.private_key, + ivan.nonce, + amount, + eve.principal.clone(), + Some(ivan_end_burn_height), + Some(eve.pox_address.clone()), + ); + ivan.nonce += 1; + + // Jude pool stacker delegating STX to Eve + let jude_delegate_stx_to_eve_tx = make_pox_4_delegate_stx( + &jude.private_key, + jude.nonce, + amount, + eve.principal.clone(), + Some(jude_end_burn_height), + Some(eve.pox_address.clone()), + ); + jude.nonce += 1; + + // Mallory pool stacker delegating STX to Eve + let mallory_delegate_stx_to_eve_tx = make_pox_4_delegate_stx( + &mallory.private_key, + mallory.nonce, + amount, + eve.principal.clone(), + Some(mallory_end_burn_height), + Some(eve.pox_address.clone()), + ); + mallory.nonce += 1; + + let davids_stackers = &[ + (frank.clone(), frank_lock_period), + (grace.clone(), grace_lock_period), + (heidi.clone(), heidi_lock_period), + ]; + let eves_stackers = &[ + (ivan.clone(), ivan_lock_period), + (jude.clone(), jude_lock_period), + (mallory.clone(), mallory_lock_period), + ]; + + // David calls 'delegate-stack-stx' for each of his stackers + let davids_delegate_stack_stx_txs: Vec<_> = davids_stackers + .iter() + .map(|(stacker, lock_period)| { + let tx = make_pox_4_delegate_stack_stx( + &david.private_key, + david.nonce, + stacker.principal.clone(), + amount, + david.pox_address.clone(), + burn_block_height as u128, + *lock_period, + ); + david.nonce += 1; + tx + }) + .collect(); + + // Eve calls 'delegate-stack-stx' for each of her stackers + let eves_delegate_stack_stx_txs: Vec<_> = eves_stackers + .iter() + .map(|(stacker, lock_period)| { + let tx = make_pox_4_delegate_stack_stx( + &eve.private_key, + eve.nonce, + stacker.principal.clone(), + amount, + eve.pox_address.clone(), + burn_block_height as u128, + *lock_period, // Must be called every reward cycle, therefore only ever lasts for 1 lock period + ); + eve.nonce += 1; + tx + }) + .collect(); + + // Alice's authorization for David to aggregate commit + let alice_authorization_for_david = make_signer_key_signature( + &david.pox_address, + &alice.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 1, + ); + + // David aggregate commits + let davids_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &david.private_key, + david.nonce, + &david.pox_address, + next_reward_cycle, + Some(alice_authorization_for_david), + &alice.public_key, + u128::MAX, + 1, + ); + david.nonce += 1; + + // Bob's authorization for Eve to aggregate commit + let bob_authorization_for_eve = make_signer_key_signature( + &eve.pox_address, + &bob.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 1, + ); + + // Eve aggregate commits + let eves_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &eve.private_key, + eve.nonce, + &eve.pox_address, + next_reward_cycle, + Some(bob_authorization_for_eve), + &bob.public_key, + u128::MAX, + 1, + ); + eve.nonce += 1; + + let mut txs = vec![ + frank_delegate_stx_to_david_tx, + grace_delegate_stx_to_david_tx, + heidi_delegate_stx_to_david_tx, + ivan_delegate_stx_to_eve_tx, + jude_delegate_stx_to_eve_tx, + mallory_delegate_stx_to_eve_tx, + carl_stack_tx, + ]; + txs.extend(davids_delegate_stack_stx_txs); + txs.extend(eves_delegate_stack_stx_txs); + txs.extend(vec![ + davids_aggregate_commit_index_tx, + eves_aggregate_commit_index_tx, + ]); + + // Advance to reward set calculation of the next reward cycle + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Check that all of David's stackers have been added to the reward set + for (stacker, stacker_lock_period) in davids_stackers { + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, david.pox_address); + assert_eq!(lock_period, *stacker_lock_period); } - // check that bob's stacking-state is gone and alice's stacking-state is correct - let bob_state = get_stacking_state_pox( - &mut peer, - &latest_block, - &bob_address.to_account_principal(), - PoxVersions::Pox4.get_name_str(), - ) - .expect("Bob should have stacking-state entry") - .expect_tuple() - .unwrap(); - let reward_indexes_str = bob_state.get("reward-set-indexes").unwrap().to_string(); - assert_eq!(reward_indexes_str, "(u1 u1 u1 u1 u1 u1)"); + // Check that all of Eve's stackers have been added to the reward set + for (stacker, stacker_lock_period) in eves_stackers { + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, eve.pox_address); + assert_eq!(lock_period, *stacker_lock_period); + } + // Check that Carl's stacker has been added to the reward set + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &carl.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, carl.pox_address); + assert_eq!(lock_period, carl_lock_period); + + // Verify stacker transactions + let mut observed_txs = HashSet::new(); + for tx_receipt in tx_block.receipts { + if let TransactionOrigin::Stacks(ref tx) = tx_receipt.transaction { + observed_txs.insert(tx.txid()); + } + } - let alice_state = get_stacking_state_pox( - &mut peer, - &latest_block, - &alice_address.to_account_principal(), - PoxVersions::Pox4.get_name_str(), - ) - .expect("Alice should have stacking-state entry") - .expect_tuple() - .unwrap(); - let reward_indexes_str = alice_state.get("reward-set-indexes").unwrap().to_string(); - assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); + for tx in &txs { + let txid = tx.txid(); + if !observed_txs.contains(&txid) { + panic!("Failed to find stacking transaction ({txid}) in observed transactions") + } + } - // check that bob is still locked at next block - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + let cycle_id = next_reward_cycle; + // Create vote txs for each signer + let alice_index = get_signer_index(&mut peer, latest_block, alice.address.clone(), cycle_id); + let bob_index = get_signer_index(&mut peer, latest_block, bob.address.clone(), cycle_id); + let carl_index = get_signer_index(&mut peer, latest_block, carl.address.clone(), cycle_id); + let alice_vote = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + let bob_vote = make_signers_vote_for_aggregate_public_key( + &bob.private_key, + bob.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + let carl_vote = make_signers_vote_for_aggregate_public_key( + &carl.private_key, + carl.nonce, + carl_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + let vote_txs = vec![alice_vote, bob_vote, carl_vote]; + alice.nonce += 1; + bob.nonce += 1; + carl.nonce += 1; - let bob_bal = get_stx_account_at( + // Mine vote txs & advance to the reward set calculation of the next reward cycle + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64); + let (latest_block, tx_block) = advance_to_block_height( &mut peer, - &latest_block, - &bob_address.to_account_principal(), + &observer, + &vote_txs, + &mut peer_nonce, + target_height, + ); + + let mut observed_txs = HashSet::new(); + for tx_receipt in tx_block.receipts { + if let TransactionOrigin::Stacks(ref tx) = tx_receipt.transaction { + observed_txs.insert(tx.txid()); + } + } + + for tx in &vote_txs { + let txid = tx.txid(); + if !observed_txs.contains(&txid) { + panic!("Failed to find vote transaction ({txid}) in observed transactions") + } + } + let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) + .expect("No approved key found"); + assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); + + // Stack for following reward cycle again and then advance to epoch 3.0 activation boundary + let reward_cycle = peer.get_reward_cycle() as u128; + let next_reward_cycle = reward_cycle.wrapping_add(1); + let carl_lock_period = carl_lock_period.wrapping_add(3); // Carl's total lock period is now 5 + let carl_signature_for_carl = make_signer_key_signature( + &carl.pox_address, + &carl.private_key, + reward_cycle, + &Pox4SignatureTopic::StackExtend, + 3, + u128::MAX, + 2, + ); + // Carl extends his lock period by 3 cycles + let carl_extend_tx = make_pox_4_extend( + &carl.private_key, + carl.nonce, + carl.pox_address.clone(), + 3, + carl.public_key, + Some(carl_signature_for_carl), + u128::MAX, + 2, ); - assert_eq!(bob_bal.unlock_height(), expected_unlock_height); - assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - - // now let's check some tx receipts + carl.nonce += 1; + let alice_authorization_for_david = make_signer_key_signature( + &david.pox_address, + &alice.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 2, + ); + // David commits his aggregate for the next reward cycle + let davids_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &david.private_key, + david.nonce, + &david.pox_address, + next_reward_cycle, + Some(alice_authorization_for_david), + &alice.public_key, + u128::MAX, + 2, + ); + david.nonce += 1; - let blocks = observer.get_blocks(); + let bob_authorization_for_eve = make_signer_key_signature( + &eve.pox_address, + &bob.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 2, + ); + // Eve commits her aggregate for the next reward cycle + let eves_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &eve.private_key, + eve.nonce, + &eve.pox_address, + next_reward_cycle, + Some(bob_authorization_for_eve), + &bob.public_key, + u128::MAX, + 2, + ); + eve.nonce += 1; - let mut alice_txs = HashMap::new(); - let mut bob_txs = HashMap::new(); - let mut coinbase_txs = vec![]; - let mut reward_cycles_in_2_5 = 0u64; + let txs = vec![ + carl_extend_tx, + davids_aggregate_commit_index_tx, + eves_aggregate_commit_index_tx, + ]; - for b in blocks.into_iter() { - if let Some(ref reward_set_data) = b.reward_set_data { - let signers_set = reward_set_data.reward_set.signers.as_ref().unwrap(); - assert_eq!(signers_set.len(), 1); - assert_eq!( - StacksPublicKey::from_private(&alice).to_bytes_compressed(), - signers_set[0].signing_key.to_vec() - ); - let rewarded_addrs = HashSet::<_>::from_iter( - reward_set_data - .reward_set - .rewarded_addresses - .iter() - .map(|a| a.to_burnchain_repr()), - ); - assert_eq!(rewarded_addrs.len(), 1); - assert_eq!( - reward_set_data.reward_set.rewarded_addresses[0].bytes(), - alice_address.bytes.0.to_vec(), - ); - reward_cycles_in_2_5 += 1; - eprintln!("{:?}", b.reward_set_data) + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Check that all of David's stackers are stacked + for (stacker, stacker_lock_period) in davids_stackers { + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, reward_cycle); + assert_eq!(pox_address, david.pox_address); + assert_eq!(lock_period, *stacker_lock_period); + } + // Check that all of Eve's stackers are stacked + for (stacker, stacker_lock_period) in eves_stackers { + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, reward_cycle); + assert_eq!(pox_address, eve.pox_address); + assert_eq!(lock_period, *stacker_lock_period); + } + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &carl.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, reward_cycle); + assert_eq!(pox_address, carl.pox_address); + assert_eq!(lock_period, carl_lock_period); + + // Verify stacker transactions + let mut observed_txs = HashSet::new(); + for tx_receipt in tx_block.receipts { + if let TransactionOrigin::Stacks(ref tx) = tx_receipt.transaction { + observed_txs.insert(tx.txid()); } + } - for (i, r) in b.receipts.into_iter().enumerate() { - if i == 0 { - coinbase_txs.push(r); - continue; - } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } - } - _ => {} - } + for tx in &txs { + let txid = tx.txid(); + if !observed_txs.contains(&txid) { + panic!("Failed to find stacking transaction ({txid}) in observed transactions") } } - assert_eq!(alice_txs.len(), 1); - assert_eq!(bob_txs.len(), 1); - // only mined one 2.5 reward cycle, but make sure it was picked up in the events loop above - assert_eq!(reward_cycles_in_2_5, 1); + let cycle_id = next_reward_cycle; + // Generate next cycle aggregate public key + peer_config.aggregate_public_key = Some( + peer_config + .test_signers + .unwrap() + .generate_aggregate_key(cycle_id as u64), + ); + // create vote txs + let alice_index = get_signer_index(&mut peer, latest_block, alice.address.clone(), cycle_id); + let bob_index = get_signer_index(&mut peer, latest_block, bob.address.clone(), cycle_id); + let carl_index = get_signer_index(&mut peer, latest_block, carl.address.clone(), cycle_id); + let alice_vote = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + let bob_vote = make_signers_vote_for_aggregate_public_key( + &bob.private_key, + bob.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + let carl_vote = make_signers_vote_for_aggregate_public_key( + &carl.private_key, + carl.nonce, + carl_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + let vote_txs = vec![alice_vote, bob_vote, carl_vote]; + alice.nonce += 1; + bob.nonce += 1; + carl.nonce += 1; - // all should have committedd okay - assert!( - match bob_txs.get(&0).unwrap().result { - Value::Response(ref r) => r.committed, - _ => false, - }, - "Bob tx0 should have committed okay" + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64); + // Submit vote transactions + let (latest_block, tx_block) = advance_to_block_height( + &mut peer, + &observer, + &vote_txs, + &mut peer_nonce, + target_height, ); - // Check that the event produced by "handle-unlock" has a well-formed print event - // and that this event is included as part of the coinbase tx - for unlock_coinbase_index in [auto_unlock_coinbase] { - // expect the unlock to occur 1 block after the handle-unlock method was invoked. - let expected_unlock_height = unlock_coinbase_index + EMPTY_SORTITIONS + 1; - let expected_cycle = pox_constants - .block_height_to_reward_cycle(0, expected_unlock_height) - .unwrap(); - assert!( - coinbase_txs[unlock_coinbase_index as usize].events.is_empty(), - "handle-unlock events are coinbase events and there should be no handle-unlock invocation in this test" - ); + let mut observed_txs = HashSet::new(); + for tx_receipt in tx_block.receipts { + if let TransactionOrigin::Stacks(ref tx) = tx_receipt.transaction { + observed_txs.insert(tx.txid()); + } } -} -/// In this test case, we lockup enough to get participation to be non-zero, but not enough to qualify for a reward slot. -#[test] -fn no_lockups_2_5() { - let EXPECTED_FIRST_V2_CYCLE = 8; - // the sim environment produces 25 empty sortitions before - // tenures start being tracked. - let EMPTY_SORTITIONS = 25; + for tx in &vote_txs { + let txid = tx.txid(); + if !observed_txs.contains(&txid) { + panic!("Failed to find vote transaction ({txid}) in observed transactions") + } + } + let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) + .expect("No approved key found"); + assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); - let (epochs, mut pox_constants) = make_test_epochs_pox(); - pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; + // Let us start stacking for the following reward cycle + let current_reward_cycle = peer.get_reward_cycle() as u128; + let next_reward_cycle = current_reward_cycle.wrapping_add(1); - let mut burnchain = Burnchain::default_unittest( - 0, - &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + let alice_authorization_for_david = make_signer_key_signature( + &david.pox_address, + &alice.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 3, ); - burnchain.pox_constants = pox_constants.clone(); - - let observer = TestEventObserver::new(); - let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( - &burnchain, - &function_name!(), - Some(epochs.clone()), - Some(&observer), + let davids_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &david.private_key, + david.nonce, + &david.pox_address, + next_reward_cycle, + Some(alice_authorization_for_david), + &alice.public_key, + u128::MAX, + 3, ); + david.nonce += 1; - peer.config.check_pox_invariants = None; - - let alice = keys.pop().unwrap(); - let bob = keys.pop().unwrap(); - let alice_address = key_to_stacks_addr(&alice); - let bob_address = key_to_stacks_addr(&bob); + let bob_authorization_for_eve = make_signer_key_signature( + &eve.pox_address, + &bob.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 3, + ); - let mut coinbase_nonce = 0; + let eves_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &eve.private_key, + eve.nonce, + &eve.pox_address, + next_reward_cycle, + Some(bob_authorization_for_eve), + &bob.public_key, + u128::MAX, + 3, + ); + eve.nonce += 1; - let first_v4_cycle = burnchain - .block_height_to_reward_cycle(burnchain.pox_constants.pox_4_activation_height as u64) - .unwrap() - + 1; + // Carl attempts a stx-increase using Alice's key instead of his own + // Should fail as he already has delegated his signing power to himself + let alice_signature_for_carl = make_signer_key_signature( + &carl.pox_address, + &alice.private_key, + current_reward_cycle, + &Pox4SignatureTopic::StackIncrease, + carl_lock_period, + u128::MAX, + 4, + ); - // produce blocks until epoch 2.5 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[7].start_height { - peer.tenure_with_txs(&[], &mut coinbase_nonce); - } + let carl_increase_tx = make_pox_4_stack_increase( + &carl.private_key, + carl.nonce, + amount, + &alice.public_key, + Some(alice_signature_for_carl), + u128::MAX, + 4, + ); + carl.nonce += 1; - let tip = get_tip(peer.sortdb.as_ref()); + let txs = vec![ + carl_increase_tx, + davids_aggregate_commit_index_tx, + eves_aggregate_commit_index_tx, + ]; - let bob_lockup = make_simple_pox_4_lock(&bob, &mut peer, 1 * POX_THRESHOLD_STEPS_USTX, 6); + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + // This assertion just makes testing logic a bit easier + let davids_stackers = &[ + (grace.clone(), grace_lock_period), + (heidi.clone(), heidi_lock_period), + ]; - let txs = [bob_lockup]; - let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); - // check that the "raw" reward set will contain an entry for bob - for cycle_number in first_v4_cycle..first_v4_cycle + 6 { - let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); - let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); - assert_eq!( - reward_set_entries.len(), - 1, - "Reward set should contain one entry in cycle {cycle_number}" - ); - assert_eq!( - reward_set_entries[0].reward_address.bytes(), - bob_address.bytes.0.to_vec() - ); + for (stacker, _) in davids_stackers { + let (pox_address, first_reward_cycle, _lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, reward_cycle); + assert_eq!(pox_address, david.pox_address); } - - // we'll produce blocks until the next reward cycle gets through the "handled start" code - // this is one block after the reward cycle starts - let height_target = burnchain.reward_cycle_to_block_height(first_v4_cycle + 1) + 1; - let auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; - - // but first, check that bob has locked tokens at (height_target + 1) - let bob_bal = get_stx_account_at( - &mut peer, - &latest_block, - &bob_address.to_account_principal(), - ); - assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - - while get_tip(peer.sortdb.as_ref()).block_height < height_target { - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // Frank should no longer be considered a stacker as his lock period has expired + assert!(get_stacker_info_pox_4(&mut peer, &frank.principal).is_none()); + + for (stacker, _) in eves_stackers { + let (pox_address, first_reward_cycle, _lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, reward_cycle); + assert_eq!(pox_address, eve.pox_address); } - let blocks = observer.get_blocks(); - for b in blocks.into_iter() { - if let Some(ref reward_set_data) = b.reward_set_data { - assert_eq!(reward_set_data.reward_set.signers, Some(vec![])); - assert!(reward_set_data.reward_set.rewarded_addresses.is_empty()); - eprintln!("{:?}", b.reward_set_data) - } - } + let (pox_address, first_reward_cycle, _lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &carl.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, reward_cycle); + assert_eq!(pox_address, carl.pox_address); + + // Assert that carl's error is err(40) + let carl_increase_err = tx_block.receipts[1].clone().result; + assert_eq!(carl_increase_err, Value::error(Value::Int(40)).unwrap()); } diff --git a/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs index aef41ef4a5..039e96f597 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs @@ -2167,7 +2167,7 @@ pub fn get_threshold_weight( threshold_weight } -fn nakamoto_tenure( +pub fn nakamoto_tenure( peer: &mut TestPeer, test_signers: &mut TestSigners, txs_of_blocks: Vec>, @@ -2202,7 +2202,7 @@ fn nakamoto_tenure( blocks_and_sizes } -fn make_dummy_tx( +pub fn make_dummy_tx( peer: &mut TestPeer, private_key: &StacksPrivateKey, nonce: &mut u64, diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index dd70fcfb01..0759ac0d01 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -4584,10 +4584,10 @@ impl StacksChainState { // strictly speaking this check is defensive. It will never be the case // that a `miner_reward` has a `recipient_contract` that is `Some(..)` // unless the block was mined in Epoch 2.1. But you can't be too - // careful... + // careful... if evaluated_epoch >= StacksEpochId::Epoch21 { // in 2.1 or later, the coinbase may optionally specify a contract into - // which the tokens get sent. If this is not given, then they are sent + // which the tokens get sent. If this is not given, then they are sent // to the miner address. miner_reward.recipient.clone() } @@ -6650,7 +6650,9 @@ impl StacksChainState { // 1: must parse (done) // 2: it must be validly signed. - StacksChainState::process_transaction_precheck(&chainstate_config, &tx) + let epoch = clarity_connection.get_epoch().clone(); + + StacksChainState::process_transaction_precheck(&chainstate_config, &tx, epoch) .map_err(|e| MemPoolRejection::FailedToValidate(e))?; // 3: it must pay a tx fee @@ -6663,7 +6665,14 @@ impl StacksChainState { )); } - // 4: the account nonces must be correct + // 4: check if transaction is valid in the current epoch + if !StacksBlock::validate_transaction_static_epoch(tx, epoch) { + return Err(MemPoolRejection::Other( + "Transaction is not supported in this epoch".to_string(), + )); + } + + // 5: the account nonces must be correct let (origin, payer) = match StacksChainState::check_transaction_nonces(clarity_connection, &tx, true) { Ok(x) => x, @@ -6725,7 +6734,7 @@ impl StacksChainState { }, )?; - // 5: the paying account must have enough funds + // 6: the paying account must have enough funds if !payer.stx_balance.can_transfer_at_burn_block( u128::from(fee), block_height, @@ -6751,7 +6760,7 @@ impl StacksChainState { } } - // 6: payload-specific checks + // 7: payload-specific checks match &tx.payload { TransactionPayload::TokenTransfer(addr, amount, _memo) => { // version byte matches? @@ -6854,7 +6863,7 @@ impl StacksChainState { } if let Some(_version) = version_opt.as_ref() { - if clarity_connection.get_epoch() < StacksEpochId::Epoch21 { + if epoch < StacksEpochId::Epoch21 { return Err(MemPoolRejection::Other( "Versioned smart contract transactions are not supported in this epoch" .to_string(), diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 9297252d15..e65c9b9a3b 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -521,8 +521,18 @@ impl StacksChainState { pub fn process_transaction_precheck( config: &DBConfig, tx: &StacksTransaction, + epoch_id: StacksEpochId, ) -> Result<(), Error> { // valid auth? + if !tx.auth.is_supported_in_epoch(epoch_id) { + let msg = format!( + "Invalid tx {}: authentication mode not supported in Epoch {epoch_id}", + tx.txid() + ); + warn!("{msg}"); + + return Err(Error::InvalidStacksTransaction(msg, false)); + } tx.verify().map_err(Error::NetError)?; // destined for us? @@ -970,14 +980,14 @@ impl StacksChainState { // Their presence in this variant makes the transaction invalid. if tx.post_conditions.len() > 0 { let msg = format!("Invalid Stacks transaction: TokenTransfer transactions do not support post-conditions"); - warn!("{}", &msg); + info!("{}", &msg); return Err(Error::InvalidStacksTransaction(msg, false)); } if *addr == origin_account.principal { let msg = format!("Invalid TokenTransfer: address tried to send to itself"); - warn!("{}", &msg); + info!("{}", &msg); return Err(Error::InvalidStacksTransaction(msg, false)); } @@ -1088,7 +1098,7 @@ impl StacksChainState { if epoch_id >= StacksEpochId::Epoch21 { // in 2.1 and later, this is a permitted runtime error. take the // fee from the payer and keep the tx. - warn!("Contract-call encountered an analysis error at runtime"; + info!("Contract-call encountered an analysis error at runtime"; "txid" => %tx.txid(), "origin" => %origin_account.principal, "origin_nonce" => %origin_account.nonce, @@ -1163,7 +1173,7 @@ impl StacksChainState { // (because this can be checked statically by the miner before mining the block). if StacksChainState::get_contract(clarity_tx, &contract_id)?.is_some() { let msg = format!("Duplicate contract '{}'", &contract_id); - warn!("{}", &msg); + info!("{}", &msg); return Err(Error::InvalidStacksTransaction(msg, false)); } @@ -1225,7 +1235,7 @@ impl StacksChainState { .sub(&cost_before) .expect("BUG: total block cost decreased"); - warn!( + info!( "Runtime error in contract analysis for {}: {:?}", &contract_id, &other_error; "txid" => %tx.txid(), @@ -1329,7 +1339,7 @@ impl StacksChainState { if epoch_id >= StacksEpochId::Epoch21 { // in 2.1 and later, this is a permitted runtime error. take the // fee from the payer and keep the tx. - warn!("Smart-contract encountered an analysis error at runtime"; + info!("Smart-contract encountered an analysis error at runtime"; "txid" => %tx.txid(), "contract" => %contract_id, "code" => %contract_code_str, @@ -1380,7 +1390,7 @@ impl StacksChainState { // Their presence in this variant makes the transaction invalid. if tx.post_conditions.len() > 0 { let msg = format!("Invalid Stacks transaction: PoisonMicroblock transactions do not support post-conditions"); - warn!("{}", &msg); + info!("{}", &msg); return Err(Error::InvalidStacksTransaction(msg, false)); } @@ -1412,7 +1422,7 @@ impl StacksChainState { // Their presence in this variant makes the transaction invalid. if tx.post_conditions.len() > 0 { let msg = format!("Invalid Stacks transaction: TenureChange transactions do not support post-conditions"); - warn!("{msg}"); + info!("{msg}"); return Err(Error::InvalidStacksTransaction(msg, false)); } @@ -1467,7 +1477,7 @@ impl StacksChainState { debug!("Process transaction {} ({})", tx.txid(), tx.payload.name()); let epoch = clarity_block.get_epoch(); - StacksChainState::process_transaction_precheck(&clarity_block.config, tx)?; + StacksChainState::process_transaction_precheck(&clarity_block.config, tx, epoch)?; // what version of Clarity did the transaction caller want? And, is it valid now? let clarity_version = StacksChainState::get_tx_clarity_version(clarity_block, tx)?; @@ -1475,7 +1485,7 @@ impl StacksChainState { // requires 2.1 and higher if clarity_block.get_epoch() < StacksEpochId::Epoch21 { let msg = format!("Invalid transaction {}: asks for Clarity2, but not in Stacks epoch 2.1 or later", tx.txid()); - warn!("{}", &msg); + info!("{}", &msg); return Err(Error::InvalidStacksTransaction(msg, false)); } } @@ -9038,7 +9048,7 @@ pub mod test { (as-contract (stx-transfer? amount tx-sender recipient)) ) - + (stx-transfer? u500000000 tx-sender (as-contract tx-sender)) "#; @@ -9203,7 +9213,7 @@ pub mod test { (as-contract (stx-transfer? amount tx-sender recipient)) ) - + (stx-transfer? u500000000 tx-sender (as-contract tx-sender)) "#; @@ -9372,6 +9382,27 @@ pub mod test { }; } + /// Call `process_transaction()` with prechecks + pub fn validate_transactions_static_epoch_and_process_transaction( + clarity_block: &mut ClarityTx, + tx: &StacksTransaction, + quiet: bool, + ast_rules: ASTRules, + ) -> Result<(u64, StacksTransactionReceipt), Error> { + let epoch = clarity_block.get_epoch(); + + if !StacksBlock::validate_transactions_static_epoch(&vec![tx.clone()], epoch) { + let msg = format!( + "Invalid transaction {}: target epoch is not activated", + tx.txid() + ); + warn!("{}", &msg); + return Err(Error::InvalidStacksTransaction(msg, false)); + } + + StacksChainState::process_transaction(clarity_block, tx, quiet, ast_rules) + } + #[test] fn test_checkerrors_at_runtime() { let privk = StacksPrivateKey::from_hex( @@ -9439,6 +9470,28 @@ pub mod test { let mut chainstate = instantiate_chainstate_with_balances(false, 0x80000000, function_name!(), balances); + let mut tx_runtime_checkerror_trait_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"foo".to_string(), + &runtime_checkerror_trait.to_string(), + None, + ) + .unwrap(), + ); + + tx_runtime_checkerror_trait_no_version.post_condition_mode = + TransactionPostConditionMode::Allow; + tx_runtime_checkerror_trait_no_version.chain_id = 0x80000000; + tx_runtime_checkerror_trait_no_version.set_tx_fee(1); + tx_runtime_checkerror_trait_no_version.set_origin_nonce(0); + + let mut signer = StacksTransactionSigner::new(&tx_runtime_checkerror_trait_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_runtime_checkerror_trait_tx_no_version = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_trait = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -9481,6 +9534,28 @@ pub mod test { let signed_runtime_checkerror_impl_tx = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_impl_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"foo-impl".to_string(), + &runtime_checkerror_impl.to_string(), + None, + ) + .unwrap(), + ); + + tx_runtime_checkerror_impl_no_version.post_condition_mode = + TransactionPostConditionMode::Allow; + tx_runtime_checkerror_impl_no_version.chain_id = 0x80000000; + tx_runtime_checkerror_impl_no_version.set_tx_fee(1); + tx_runtime_checkerror_impl_no_version.set_origin_nonce(1); + + let mut signer = StacksTransactionSigner::new(&tx_runtime_checkerror_impl_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_runtime_checkerror_impl_tx_no_version = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_clar1 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -9502,6 +9577,28 @@ pub mod test { let signed_runtime_checkerror_tx_clar1 = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_clar1_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"trait-checkerror".to_string(), + &runtime_checkerror.to_string(), + None, + ) + .unwrap(), + ); + + tx_runtime_checkerror_clar1_no_version.post_condition_mode = + TransactionPostConditionMode::Allow; + tx_runtime_checkerror_clar1_no_version.chain_id = 0x80000000; + tx_runtime_checkerror_clar1_no_version.set_tx_fee(1); + tx_runtime_checkerror_clar1_no_version.set_origin_nonce(2); + + let mut signer = StacksTransactionSigner::new(&tx_runtime_checkerror_clar1_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_runtime_checkerror_tx_clar1_no_version = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_clar2 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -9569,6 +9666,29 @@ pub mod test { let signed_runtime_checkerror_cc_contract_tx_clar1 = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_cc_contract_clar1_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"trait-checkerror-cc".to_string(), + &runtime_checkerror_contract.to_string(), + None, + ) + .unwrap(), + ); + + tx_runtime_checkerror_cc_contract_clar1_no_version.post_condition_mode = + TransactionPostConditionMode::Allow; + tx_runtime_checkerror_cc_contract_clar1_no_version.chain_id = 0x80000000; + tx_runtime_checkerror_cc_contract_clar1_no_version.set_tx_fee(1); + tx_runtime_checkerror_cc_contract_clar1_no_version.set_origin_nonce(3); + + let mut signer = + StacksTransactionSigner::new(&tx_runtime_checkerror_cc_contract_clar1_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_runtime_checkerror_cc_contract_tx_clar1_no_version = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_cc_contract_clar2 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -9605,34 +9725,34 @@ pub mod test { &BlockHeaderHash([1u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_trait_tx, + &signed_runtime_checkerror_trait_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_impl_tx, + &signed_runtime_checkerror_impl_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_tx_clar1, + &signed_runtime_checkerror_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_trait_checkerror_tx, false, @@ -9646,12 +9766,52 @@ pub mod test { } else { panic!("Did not get unchecked interpreter error"); } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_runtime_checkerror_impl_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_runtime_checkerror_tx_clar1, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_runtime_checkerror_trait_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + let acct = StacksChainState::get_account(&mut conn, &addr.into()); assert_eq!(acct.nonce, 3); - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_cc_contract_tx_clar1, + &signed_runtime_checkerror_cc_contract_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) @@ -9670,41 +9830,41 @@ pub mod test { // in 2.05, this invalidates the block let mut conn = chainstate.block_begin( - &TestBurnStateDB_20, + &TestBurnStateDB_2_05, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([2u8; 20]), &BlockHeaderHash([2u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_trait_tx, + &signed_runtime_checkerror_trait_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_impl_tx, + &signed_runtime_checkerror_impl_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_tx_clar1, + &signed_runtime_checkerror_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_trait_checkerror_tx, false, @@ -9718,12 +9878,51 @@ pub mod test { } else { panic!("Did not get unchecked interpreter error"); } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_runtime_checkerror_impl_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_runtime_checkerror_tx_clar1, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_runtime_checkerror_trait_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } let acct = StacksChainState::get_account(&mut conn, &addr.into()); assert_eq!(acct.nonce, 3); - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_cc_contract_tx_clar1, + &signed_runtime_checkerror_cc_contract_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) @@ -9756,7 +9955,7 @@ pub mod test { let signed_runtime_checkerror_cc_contract_tx_clar1 = signer.get_tx().unwrap(); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_trait_tx, false, @@ -9765,7 +9964,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_impl_tx, false, @@ -9783,7 +9982,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_trait_checkerror_tx, false, @@ -9807,7 +10006,7 @@ pub mod test { .find("TypeValueError(OptionalType(CallableType(Trait(TraitIdentifier ") .is_some()); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_cc_contract_tx_clar1, false, @@ -9842,7 +10041,7 @@ pub mod test { &BlockHeaderHash([4u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_trait_tx, false, @@ -9851,7 +10050,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_impl_tx, false, @@ -9860,7 +10059,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_tx_clar2, false, @@ -9869,7 +10068,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_trait_checkerror_tx, false, @@ -9889,7 +10088,7 @@ pub mod test { assert!(tx_receipt.vm_error.is_none()); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_cc_contract_tx_clar2, false, @@ -9976,6 +10175,27 @@ pub mod test { let signed_foo_trait_tx = signer.get_tx().unwrap(); + let mut tx_foo_trait_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"foo".to_string(), + &foo_trait.to_string(), + None, + ) + .unwrap(), + ); + + tx_foo_trait_no_version.post_condition_mode = TransactionPostConditionMode::Allow; + tx_foo_trait_no_version.chain_id = 0x80000000; + tx_foo_trait_no_version.set_tx_fee(1); + tx_foo_trait_no_version.set_origin_nonce(0); + + let mut signer = StacksTransactionSigner::new(&tx_foo_trait_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_foo_trait_tx_no_version = signer.get_tx().unwrap(); + let mut tx_foo_impl = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -9997,6 +10217,27 @@ pub mod test { let signed_foo_impl_tx = signer.get_tx().unwrap(); + let mut tx_foo_impl_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"foo-impl".to_string(), + &foo_impl.to_string(), + None, + ) + .unwrap(), + ); + + tx_foo_impl_no_version.post_condition_mode = TransactionPostConditionMode::Allow; + tx_foo_impl_no_version.chain_id = 0x80000000; + tx_foo_impl_no_version.set_tx_fee(1); + tx_foo_impl_no_version.set_origin_nonce(1); + + let mut signer = StacksTransactionSigner::new(&tx_foo_impl_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_foo_impl_tx_no_version = signer.get_tx().unwrap(); + let mut tx_call_foo_clar1 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -10018,6 +10259,27 @@ pub mod test { let signed_call_foo_tx_clar1 = signer.get_tx().unwrap(); + let mut tx_call_foo_clar1_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"call-foo".to_string(), + &call_foo.to_string(), + None, + ) + .unwrap(), + ); + + tx_call_foo_clar1_no_version.post_condition_mode = TransactionPostConditionMode::Allow; + tx_call_foo_clar1_no_version.chain_id = 0x80000000; + tx_call_foo_clar1_no_version.set_tx_fee(1); + tx_call_foo_clar1_no_version.set_origin_nonce(2); + + let mut signer = StacksTransactionSigner::new(&tx_call_foo_clar1_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_call_foo_tx_clar1_no_version = signer.get_tx().unwrap(); + let mut tx_call_foo_clar2 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -10078,27 +10340,27 @@ pub mod test { &BlockHeaderHash([1u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_trait_tx, + &signed_foo_trait_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_impl_tx, + &signed_foo_impl_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_call_foo_tx_clar1, + &signed_call_foo_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) @@ -10112,38 +10374,77 @@ pub mod test { _ => panic!("expected the contract publish to fail"), } + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_trait_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_impl_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_call_foo_tx_clar1, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + conn.commit_block(); // in 2.05: analysis error should cause contract publish to fail let mut conn = chainstate.block_begin( - &TestBurnStateDB_20, + &TestBurnStateDB_2_05, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([2u8; 20]), &BlockHeaderHash([2u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_trait_tx, + &signed_foo_trait_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_impl_tx, + &signed_foo_impl_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_call_foo_tx_clar1, + &signed_call_foo_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) @@ -10157,7 +10458,7 @@ pub mod test { _ => panic!("expected the contract publish to fail"), } - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_call_foo_tx, false, @@ -10172,6 +10473,45 @@ pub mod test { panic!("Did not get unchecked interpreter error"); } + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_trait_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_impl_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_call_foo_tx_clar1, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + conn.commit_block(); // in 2.1, using clarity 1: analysis error should cause contract publish to fail @@ -10183,7 +10523,7 @@ pub mod test { &BlockHeaderHash([3u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_trait_tx, false, @@ -10192,7 +10532,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_impl_tx, false, @@ -10201,7 +10541,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_call_foo_tx_clar1, false, @@ -10228,7 +10568,7 @@ pub mod test { &BlockHeaderHash([4u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_trait_tx, false, @@ -10237,7 +10577,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_impl_tx, false, @@ -10246,7 +10586,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_call_foo_tx_clar2, false, @@ -10255,7 +10595,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_call_foo_tx, false, @@ -10348,6 +10688,27 @@ pub mod test { let signed_foo_trait_tx = signer.get_tx().unwrap(); + let mut tx_foo_trait_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"foo".to_string(), + &foo_trait.to_string(), + None, + ) + .unwrap(), + ); + + tx_foo_trait_no_version.post_condition_mode = TransactionPostConditionMode::Allow; + tx_foo_trait_no_version.chain_id = 0x80000000; + tx_foo_trait_no_version.set_tx_fee(1); + tx_foo_trait_no_version.set_origin_nonce(0); + + let mut signer = StacksTransactionSigner::new(&tx_foo_trait_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_foo_trait_tx_no_version = signer.get_tx().unwrap(); + let mut tx_transitive_trait_clar1 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -10369,6 +10730,28 @@ pub mod test { let signed_transitive_trait_clar1_tx = signer.get_tx().unwrap(); + let mut tx_transitive_trait_clar1_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"transitive".to_string(), + &transitive_trait.to_string(), + None, + ) + .unwrap(), + ); + + tx_transitive_trait_clar1_no_version.post_condition_mode = + TransactionPostConditionMode::Allow; + tx_transitive_trait_clar1_no_version.chain_id = 0x80000000; + tx_transitive_trait_clar1_no_version.set_tx_fee(1); + tx_transitive_trait_clar1_no_version.set_origin_nonce(1); + + let mut signer = StacksTransactionSigner::new(&tx_transitive_trait_clar1_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_transitive_trait_clar1_tx_no_version = signer.get_tx().unwrap(); + let mut tx_transitive_trait_clar2 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -10411,6 +10794,27 @@ pub mod test { let signed_foo_impl_tx = signer.get_tx().unwrap(); + let mut tx_foo_impl_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"foo-impl".to_string(), + &foo_impl.to_string(), + None, + ) + .unwrap(), + ); + + tx_foo_impl_no_version.post_condition_mode = TransactionPostConditionMode::Allow; + tx_foo_impl_no_version.chain_id = 0x80000000; + tx_foo_impl_no_version.set_tx_fee(1); + tx_foo_impl_no_version.set_origin_nonce(2); + + let mut signer = StacksTransactionSigner::new(&tx_foo_impl_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_foo_impl_tx_no_version = signer.get_tx().unwrap(); + let mut tx_call_foo_clar1 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -10432,6 +10836,27 @@ pub mod test { let signed_call_foo_tx_clar1 = signer.get_tx().unwrap(); + let mut tx_call_foo_clar1_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"call-foo".to_string(), + &call_foo.to_string(), + None, + ) + .unwrap(), + ); + + tx_call_foo_clar1_no_version.post_condition_mode = TransactionPostConditionMode::Allow; + tx_call_foo_clar1_no_version.chain_id = 0x80000000; + tx_call_foo_clar1_no_version.set_tx_fee(1); + tx_call_foo_clar1_no_version.set_origin_nonce(3); + + let mut signer = StacksTransactionSigner::new(&tx_call_foo_clar1_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_call_foo_tx_clar1_no_version = signer.get_tx().unwrap(); + let mut tx_call_foo_clar2 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -10491,43 +10916,43 @@ pub mod test { &BlockHeaderHash([1u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_trait_tx, + &signed_foo_trait_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_transitive_trait_clar1_tx, + &signed_transitive_trait_clar1_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_impl_tx, + &signed_foo_impl_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_call_foo_tx_clar1, + &signed_call_foo_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_call_foo_tx, false, @@ -10543,54 +10968,106 @@ pub mod test { } assert_eq!(fee, 1); + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_trait_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_transitive_trait_clar1_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_impl_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_call_foo_tx_clar1, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + conn.commit_block(); // in 2.05: calling call-foo invalidates the block let mut conn = chainstate.block_begin( - &TestBurnStateDB_20, + &TestBurnStateDB_2_05, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([2u8; 20]), &BlockHeaderHash([2u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_trait_tx, + &signed_foo_trait_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_transitive_trait_clar1_tx, + &signed_transitive_trait_clar1_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_impl_tx, + &signed_foo_impl_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_call_foo_tx_clar1, + &signed_call_foo_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_call_foo_tx, false, @@ -10605,6 +11082,58 @@ pub mod test { panic!("Did not get unchecked interpreter error"); } + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_trait_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_transitive_trait_clar1_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_impl_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_call_foo_tx_clar1, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + conn.commit_block(); // in 2.1, using clarity 1 for both `transitive` and `call-foo`: calling call-foo causes an analysis error @@ -10616,7 +11145,7 @@ pub mod test { &BlockHeaderHash([3u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_trait_tx, false, @@ -10625,7 +11154,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_transitive_trait_clar1_tx, false, @@ -10634,7 +11163,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_impl_tx, false, @@ -10643,7 +11172,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_call_foo_tx_clar1, false, @@ -10652,7 +11181,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_call_foo_tx, false, @@ -10683,7 +11212,7 @@ pub mod test { &BlockHeaderHash([4u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_trait_tx, false, @@ -10692,7 +11221,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_transitive_trait_clar1_tx, false, @@ -10701,7 +11230,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_impl_tx, false, @@ -10710,7 +11239,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_call_foo_tx_clar2, false, @@ -10719,7 +11248,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_call_foo_tx, false, @@ -10750,7 +11279,7 @@ pub mod test { &BlockHeaderHash([5u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_trait_tx, false, @@ -10759,7 +11288,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_transitive_trait_clar2_tx, false, @@ -10768,7 +11297,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_impl_tx, false, @@ -10777,7 +11306,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_call_foo_tx_clar2, false, @@ -10804,7 +11333,7 @@ pub mod test { &BlockHeaderHash([6u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_trait_tx, false, @@ -10813,7 +11342,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_transitive_trait_clar2_tx, false, @@ -10822,7 +11351,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_impl_tx, false, @@ -10831,7 +11360,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_call_foo_tx_clar1, false, diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 630454eabb..d5dd77c51f 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1488,7 +1488,7 @@ impl MARF { self.open_chain_tip.as_ref().map(|x| &x.block_hash) } - /// Get open chain tip + /// Get open chain tip block height pub fn get_open_chain_tip_height(&self) -> Option { self.open_chain_tip.as_ref().map(|x| x.height) } diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index ec8ac4a36c..35cb97243f 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1279,14 +1279,14 @@ impl<'a> StacksMicroblockBuilder<'a> { // Make the block from the transactions we did manage to get debug!("Block budget exceeded on tx {}", &mempool_tx.tx.txid()); if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { - debug!("Block budget exceeded while mining microblock"; + debug!("Block budget exceeded while mining microblock"; "tx" => %mempool_tx.tx.txid(), "next_behavior" => "Switch to mining stx-transfers only"); block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; } else if block_limit_hit == BlockLimitFunction::CONTRACT_LIMIT_HIT { - debug!("Block budget exceeded while mining microblock"; + debug!("Block budget exceeded while mining microblock"; "tx" => %mempool_tx.tx.txid(), "next_behavior" => "Stop mining microblock"); block_limit_hit = BlockLimitFunction::LIMIT_REACHED; return Ok(None); @@ -2332,7 +2332,7 @@ impl StacksBlockBuilder { // if we have an invalid transaction that was quietly ignored, don't warn here either } e => { - warn!("Failed to apply tx {}: {:?}", &txinfo.tx.txid(), &e); + info!("Failed to apply tx {}: {:?}", &txinfo.tx.txid(), &e); return Ok(Some(result_event)); } } diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index f9ad4fff3f..4cb958a248 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -506,6 +506,13 @@ pub enum MultisigHashMode { P2WSH = 0x03, } +#[repr(u8)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum OrderIndependentMultisigHashMode { + P2SH = 0x05, + P2WSH = 0x07, +} + impl SinglesigHashMode { pub fn to_address_hash_mode(&self) -> AddressHashMode { match *self { @@ -556,6 +563,35 @@ impl MultisigHashMode { } } +impl OrderIndependentMultisigHashMode { + pub fn to_address_hash_mode(&self) -> AddressHashMode { + match *self { + OrderIndependentMultisigHashMode::P2SH => AddressHashMode::SerializeP2SH, + OrderIndependentMultisigHashMode::P2WSH => AddressHashMode::SerializeP2WSH, + } + } + + pub fn from_address_hash_mode(hm: AddressHashMode) -> Option { + match hm { + AddressHashMode::SerializeP2SH => Some(OrderIndependentMultisigHashMode::P2SH), + AddressHashMode::SerializeP2WSH => Some(OrderIndependentMultisigHashMode::P2WSH), + _ => None, + } + } + + pub fn from_u8(n: u8) -> Option { + match n { + x if x == OrderIndependentMultisigHashMode::P2SH as u8 => { + Some(OrderIndependentMultisigHashMode::P2SH) + } + x if x == OrderIndependentMultisigHashMode::P2WSH as u8 => { + Some(OrderIndependentMultisigHashMode::P2WSH) + } + _ => None, + } + } +} + /// A structure that encodes enough state to authenticate /// a transaction's execution against a Stacks address. /// public_keys + signatures_required determines the Principal. @@ -580,10 +616,21 @@ pub struct SinglesigSpendingCondition { pub signature: MessageSignature, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct OrderIndependentMultisigSpendingCondition { + pub hash_mode: OrderIndependentMultisigHashMode, + pub signer: Hash160, + pub nonce: u64, // nth authorization from this account + pub tx_fee: u64, // microSTX/compute rate offered by this account + pub fields: Vec, + pub signatures_required: u16, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum TransactionSpendingCondition { Singlesig(SinglesigSpendingCondition), Multisig(MultisigSpendingCondition), + OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition), } /// Types of transaction authorizations @@ -1097,6 +1144,7 @@ pub mod test { chain_id: u32, anchor_mode: &TransactionAnchorMode, post_condition_mode: &TransactionPostConditionMode, + epoch_id: StacksEpochId, ) -> Vec { let addr = StacksAddress { version: 1, @@ -1130,7 +1178,7 @@ pub mod test { signature: MessageSignature([3u8; 65]), }; - let spending_conditions = vec![ + let mut spending_conditions = vec![ TransactionSpendingCondition::Singlesig(SinglesigSpendingCondition { signer: Hash160([0x11; 20]), hash_mode: SinglesigHashMode::P2PKH, @@ -1190,9 +1238,50 @@ pub mod test { TransactionAuthField::PublicKey(PubKey::from_hex("03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77").unwrap()) ], signatures_required: 2 - }) + }), ]; + if epoch_id >= StacksEpochId::Epoch30 { + spending_conditions.append(&mut vec![ + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2WSH, + nonce: 678, + tx_fee: 901, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77").unwrap()) + ], + signatures_required: 2 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 345, + tx_fee: 678, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("04ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c771f112f919b00a6c6c5f51f7c63e1762fe9fac9b66ec75a053db7f51f4a52712b").unwrap()), + ], + signatures_required: 2 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 456, + tx_fee: 789, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77").unwrap()) + ], + signatures_required: 2 + }), + ]) + } + let mut tx_auths = vec![]; for i in 0..spending_conditions.len() { let spending_condition = &spending_conditions[i]; @@ -1340,7 +1429,7 @@ pub mod test { }; let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); - let tx_payloads = vec![ + let mut tx_payloads = vec![ TransactionPayload::TokenTransfer( stx_address.into(), 123, @@ -1384,48 +1473,60 @@ pub mod test { }, Some(ClarityVersion::Clarity2), ), - TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, None), - TransactionPayload::Coinbase( - CoinbasePayload([0x12; 32]), - Some(PrincipalData::Contract( - QualifiedContractIdentifier::transient(), - )), - None, - ), - TransactionPayload::Coinbase( - CoinbasePayload([0x12; 32]), - Some(PrincipalData::Standard(StandardPrincipalData( - 0x01, [0x02; 20], - ))), - None, - ), - TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(proof.clone())), - TransactionPayload::Coinbase( - CoinbasePayload([0x12; 32]), - Some(PrincipalData::Contract( - QualifiedContractIdentifier::transient(), - )), - Some(proof.clone()), - ), - TransactionPayload::Coinbase( - CoinbasePayload([0x12; 32]), - Some(PrincipalData::Standard(StandardPrincipalData( - 0x01, [0x02; 20], - ))), - Some(proof.clone()), - ), TransactionPayload::PoisonMicroblock(mblock_header_1, mblock_header_2), - TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: ConsensusHash([0x01; 20]), - prev_tenure_consensus_hash: ConsensusHash([0x02; 20]), - burn_view_consensus_hash: ConsensusHash([0x03; 20]), - previous_tenure_end: StacksBlockId([0x00; 32]), - previous_tenure_blocks: 0, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0x00; 20]), - }), ]; + if epoch_id >= StacksEpochId::Epoch30 { + tx_payloads.append(&mut vec![ + TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: ConsensusHash([0x01; 20]), + prev_tenure_consensus_hash: ConsensusHash([0x02; 20]), + burn_view_consensus_hash: ConsensusHash([0x03; 20]), + previous_tenure_end: StacksBlockId([0x00; 32]), + previous_tenure_blocks: 0, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0x00; 20]), + }), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + None, + Some(proof.clone()), + ), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(PrincipalData::Contract( + QualifiedContractIdentifier::transient(), + )), + Some(proof.clone()), + ), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(PrincipalData::Standard(StandardPrincipalData( + 0x01, [0x02; 20], + ))), + Some(proof.clone()), + ), + ]) + } else { + tx_payloads.append(&mut vec![ + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, None), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(PrincipalData::Contract( + QualifiedContractIdentifier::transient(), + )), + None, + ), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(PrincipalData::Standard(StandardPrincipalData( + 0x01, [0x02; 20], + ))), + None, + ), + ]) + } + // create all kinds of transactions let mut all_txs = vec![]; for tx_auth in tx_auths.iter() { @@ -1464,7 +1565,7 @@ pub mod test { all_txs } - pub fn make_codec_test_block(num_txs: usize) -> StacksBlock { + pub fn make_codec_test_block(num_txs: usize, epoch_id: StacksEpochId) -> StacksBlock { let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); @@ -1483,6 +1584,11 @@ pub mod test { origin_auth.clone(), TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); + let tx_coinbase_proof = StacksTransaction::new( + TransactionVersion::Mainnet, + origin_auth.clone(), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, Some(proof.clone())), + ); tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -1491,11 +1597,17 @@ pub mod test { 0x80000000, &TransactionAnchorMode::OnChainOnly, &TransactionPostConditionMode::Allow, + epoch_id, ); // remove all coinbases, except for an initial coinbase let mut txs_anchored = vec![]; - txs_anchored.push(tx_coinbase); + + if epoch_id >= StacksEpochId::Epoch30 { + txs_anchored.push(tx_coinbase_proof); + } else { + txs_anchored.push(tx_coinbase); + } for tx in all_txs.drain(..) { match tx.payload { @@ -1561,6 +1673,7 @@ pub mod test { 0x80000000, &TransactionAnchorMode::OffChainOnly, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); let txs_mblock: Vec<_> = all_txs.into_iter().take(num_txs).collect(); diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index ae428af15f..e4242fd270 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -3429,7 +3429,7 @@ fn test_contract_call_across_clarity_versions() { let contract = format!(" (impl-trait .chain-id-trait-v1.trait-v1) (impl-trait .chain-id-trait-v2.trait-v2) - + (use-trait chain-info-v1 .chain-id-trait-v1.trait-v1) (use-trait chain-info-v2 .chain-id-trait-v2.trait-v2) @@ -3468,7 +3468,7 @@ fn test_contract_call_across_clarity_versions() { ) ) (define-read-only (test-at-block-recursive) - (at-block 0x{} + (at-block 0x{} (begin ;; this only works in clarity2 (print {{ tenure: u{}, version: u2, chain: chain-id, func: \"test-at-block-func-recursive-v2\" }}) @@ -3547,7 +3547,7 @@ fn test_contract_call_across_clarity_versions() { let contract = format!(" (impl-trait .chain-id-trait-v1.trait-v1) (impl-trait .chain-id-trait-v2.trait-v2) - + (use-trait chain-info-v1 .chain-id-trait-v1.trait-v1) (use-trait chain-info-v2 .chain-id-trait-v2.trait-v2) @@ -3583,14 +3583,14 @@ fn test_contract_call_across_clarity_versions() { ) ) (define-read-only (test-at-block-recursive) - (at-block 0x{} + (at-block 0x{} (begin (print {{ tenure: u{}, version: u1, func: \"test-at-block-func-recursive-v1\" }}) (contract-call? .test-{} test-at-block-recursive) ) ) ) - + (define-read-only (get-call-count) (var-get call-count) ) @@ -4746,6 +4746,7 @@ fn paramaterized_mempool_walk_test( 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); let mut transaction_counter = 0; diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index cc2fe940b1..09b75047cc 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -3117,7 +3117,7 @@ pub fn mine_smart_contract_block_contract_call_microblock_exception( microblocks.push(microblock); } - test_debug!("Produce anchored stacks block {} with smart contract and {} microblocks with contract call at burnchain height {} stacks height {}", + test_debug!("Produce anchored stacks block {} with smart contract and {} microblocks with contract call at burnchain height {} stacks height {}", stacks_block.block_hash(), microblocks.len(), burnchain_height, stacks_block.header.total_work.work); (stacks_block, microblocks) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 22a331b193..aa312bbdb8 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -1227,6 +1227,108 @@ pub fn make_versioned_user_contract_publish( sign_standard_singlesig_tx(payload, sender, nonce, tx_fee) } +pub fn sign_tx_order_independent_p2sh( + payload: TransactionPayload, + privks: &[StacksPrivateKey], + num_sigs: usize, + sender_nonce: u64, + tx_fee: u64, +) -> StacksTransaction { + let mut pubks = vec![]; + for privk in privks.iter() { + pubks.push(StacksPublicKey::from_private(privk)); + } + let mut sender_spending_condition = + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + num_sigs as u16, + pubks.clone(), + ) + .expect("Failed to create p2sh spending condition."); + sender_spending_condition.set_nonce(sender_nonce); + sender_spending_condition.set_tx_fee(tx_fee); + let auth = TransactionAuth::Standard(sender_spending_condition); + let mut unsigned_tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); + unsigned_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; + unsigned_tx.chain_id = 0x80000000; + + let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); + + for signer in 0..num_sigs { + tx_signer.sign_origin(&privks[signer]).unwrap(); + } + + for signer in num_sigs..pubks.len() { + tx_signer.append_origin(&pubks[signer]).unwrap(); + } + + tx_signer.get_tx().unwrap() +} + +pub fn sign_tx_order_independent_p2wsh( + payload: TransactionPayload, + privks: &[StacksPrivateKey], + num_sigs: usize, + sender_nonce: u64, + tx_fee: u64, +) -> StacksTransaction { + let mut pubks = vec![]; + for privk in privks.iter() { + pubks.push(StacksPublicKey::from_private(privk)); + } + let mut sender_spending_condition = + TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + num_sigs as u16, + pubks.clone(), + ) + .expect("Failed to create p2wsh spending condition."); + sender_spending_condition.set_nonce(sender_nonce); + sender_spending_condition.set_tx_fee(tx_fee); + let auth = TransactionAuth::Standard(sender_spending_condition); + let mut unsigned_tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); + unsigned_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; + unsigned_tx.chain_id = 0x80000000; + + let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); + + for signer in 0..num_sigs { + tx_signer.sign_origin(&privks[signer]).unwrap(); + } + + for signer in num_sigs..pubks.len() { + tx_signer.append_origin(&pubks[signer]).unwrap(); + } + + tx_signer.get_tx().unwrap() +} + +pub fn make_stacks_transfer_order_independent_p2sh( + privks: &[StacksPrivateKey], + num_sigs: usize, + nonce: u64, + tx_fee: u64, + recipient: &PrincipalData, + amount: u64, +) -> StacksTransaction { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + sign_tx_order_independent_p2sh(payload, privks, num_sigs, nonce, tx_fee) +} + +pub fn make_stacks_transfer_order_independent_p2wsh( + privks: &[StacksPrivateKey], + num_sigs: usize, + nonce: u64, + tx_fee: u64, + recipient: &PrincipalData, + amount: u64, +) -> StacksTransaction { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + sign_tx_order_independent_p2wsh(payload, privks, num_sigs, nonce, tx_fee) +} + pub fn make_user_contract_call( sender: &StacksPrivateKey, nonce: u64, diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index 4ede285e41..2204f57a25 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -120,6 +120,7 @@ fn ClarityVersion_consensus_serialize( match *version { ClarityVersion::Clarity1 => write_next(fd, &1u8)?, ClarityVersion::Clarity2 => write_next(fd, &2u8)?, + ClarityVersion::Clarity3 => write_next(fd, &3u8)?, } Ok(()) } @@ -131,6 +132,7 @@ fn ClarityVersion_consensus_deserialize( match version_byte { 1u8 => Ok(ClarityVersion::Clarity1), 2u8 => Ok(ClarityVersion::Clarity2), + 3u8 => Ok(ClarityVersion::Clarity3), _ => Err(codec_error::DeserializeError(format!( "Unrecognized ClarityVersion byte {}", &version_byte @@ -686,19 +688,17 @@ impl StacksTransaction { ))); } }; + let tx = StacksTransaction { + version, + chain_id, + auth, + anchor_mode, + post_condition_mode, + post_conditions, + payload, + }; - Ok(( - StacksTransaction { - version, - chain_id, - auth, - anchor_mode, - post_condition_mode, - post_conditions, - payload, - }, - fd.num_read(), - )) + Ok((tx, fd.num_read())) } /// Try to convert to a coinbase payload @@ -873,6 +873,10 @@ impl StacksTransaction { privk, )?; match condition { + TransactionSpendingCondition::Singlesig(ref mut cond) => { + cond.set_signature(next_sig); + Ok(next_sighash) + } TransactionSpendingCondition::Multisig(ref mut cond) => { cond.push_signature( if privk.compress_public() { @@ -884,9 +888,16 @@ impl StacksTransaction { ); Ok(next_sighash) } - TransactionSpendingCondition::Singlesig(ref mut cond) => { - cond.set_signature(next_sig); - Ok(next_sighash) + TransactionSpendingCondition::OrderIndependentMultisig(ref mut cond) => { + cond.push_signature( + if privk.compress_public() { + TransactionPublicKeyEncoding::Compressed + } else { + TransactionPublicKeyEncoding::Uncompressed + }, + next_sig, + ); + Ok(*cur_sighash) } } } @@ -897,6 +908,9 @@ impl StacksTransaction { ) -> Option { match condition { TransactionSpendingCondition::Multisig(ref mut cond) => cond.pop_auth_field(), + TransactionSpendingCondition::OrderIndependentMultisig(ref mut cond) => { + cond.pop_auth_field() + } TransactionSpendingCondition::Singlesig(ref mut cond) => cond.pop_signature(), } } @@ -911,6 +925,10 @@ impl StacksTransaction { cond.push_public_key(pubkey.clone()); Ok(()) } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut cond) => { + cond.push_public_key(pubkey.clone()); + Ok(()) + } _ => Err(net_error::SigningError( "Not a multisig condition".to_string(), )), @@ -1234,6 +1252,111 @@ mod test { use crate::net::codec::*; use crate::net::*; + impl StacksTransaction { + /// Sign a sighash without appending the signature and public key + /// to the given spending condition. + /// Returns the resulting signature + fn sign_no_append_origin( + &self, + cur_sighash: &Txid, + privk: &StacksPrivateKey, + ) -> Result { + let next_sig = match self.auth { + TransactionAuth::Standard(ref origin_condition) + | TransactionAuth::Sponsored(ref origin_condition, _) => { + let (next_sig, _next_sighash) = TransactionSpendingCondition::next_signature( + cur_sighash, + &TransactionAuthFlags::AuthStandard, + origin_condition.tx_fee(), + origin_condition.nonce(), + privk, + )?; + next_sig + } + }; + Ok(next_sig) + } + + /// Appends a signature and public key to the spending condition. + fn append_origin_signature( + &mut self, + signature: MessageSignature, + key_encoding: TransactionPublicKeyEncoding, + ) -> Result<(), net_error> { + match self.auth { + TransactionAuth::Standard(ref mut origin_condition) + | TransactionAuth::Sponsored(ref mut origin_condition, _) => match origin_condition + { + TransactionSpendingCondition::Singlesig(ref mut cond) => { + cond.set_signature(signature); + } + TransactionSpendingCondition::Multisig(ref mut cond) => { + cond.push_signature(key_encoding, signature); + } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut cond) => { + cond.push_signature(key_encoding, signature); + } + }, + }; + Ok(()) + } + + /// Sign a sighash as a sponsor without appending the signature and public key + /// to the given spending condition. + /// Returns the resulting signature + fn sign_no_append_sponsor( + &mut self, + cur_sighash: &Txid, + privk: &StacksPrivateKey, + ) -> Result { + let next_sig = match self.auth { + TransactionAuth::Standard(_) => { + return Err(net_error::SigningError( + "Cannot sign standard authorization with a sponsoring private key" + .to_string(), + )); + } + TransactionAuth::Sponsored(_, ref mut sponsor_condition) => { + let (next_sig, _next_sighash) = TransactionSpendingCondition::next_signature( + cur_sighash, + &TransactionAuthFlags::AuthSponsored, + sponsor_condition.tx_fee(), + sponsor_condition.nonce(), + privk, + )?; + next_sig + } + }; + Ok(next_sig) + } + + /// Appends a sponsor signature and public key to the spending condition. + pub fn append_sponsor_signature( + &mut self, + signature: MessageSignature, + key_encoding: TransactionPublicKeyEncoding, + ) -> Result<(), net_error> { + match self.auth { + TransactionAuth::Standard(_) => Err(net_error::SigningError( + "Cannot appned a public key to the sponsor of a standard auth condition" + .to_string(), + )), + TransactionAuth::Sponsored(_, ref mut sponsor_condition) => match sponsor_condition + { + TransactionSpendingCondition::Singlesig(ref mut cond) => { + Ok(cond.set_signature(signature)) + } + TransactionSpendingCondition::Multisig(ref mut cond) => { + Ok(cond.push_signature(key_encoding, signature)) + } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut cond) => { + Ok(cond.push_signature(key_encoding, signature)) + } + }, + } + } + } + fn corrupt_auth_field( corrupt_auth_fields: &TransactionAuth, i: usize, @@ -1264,6 +1387,20 @@ mod test { }; data.fields[i] = corrupt_field } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + let corrupt_field = match data.fields[i] { + TransactionAuthField::PublicKey(ref pubkey) => { + TransactionAuthField::PublicKey(StacksPublicKey::from_hex("0270790e675116a63a75008832d82ad93e4332882ab0797b0f156de9d739160a0b").unwrap()) + } + TransactionAuthField::Signature(ref key_encoding, ref sig) => { + let mut sig_bytes = sig.as_bytes().to_vec(); + sig_bytes[1] ^= 1u8; // this breaks the `r` paramter + let corrupt_sig = MessageSignature::from_raw(&sig_bytes); + TransactionAuthField::Signature(*key_encoding, corrupt_sig) + } + }; + data.fields[i] = corrupt_field + } } } } @@ -1289,6 +1426,20 @@ mod test { }; data.fields[i] = corrupt_field } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + let corrupt_field = match data.fields[i] { + TransactionAuthField::PublicKey(_) => { + TransactionAuthField::PublicKey(StacksPublicKey::from_hex("0270790e675116a63a75008832d82ad93e4332882ab0797b0f156de9d739160a0b").unwrap()) + } + TransactionAuthField::Signature(ref key_encoding, ref sig) => { + let mut sig_bytes = sig.as_bytes().to_vec(); + sig_bytes[1] ^= 1u8; // this breaks the `r` paramter + let corrupt_sig = MessageSignature::from_raw(&sig_bytes); + TransactionAuthField::Signature(*key_encoding, corrupt_sig) + } + }; + data.fields[i] = corrupt_field + } } } if corrupt_sponsor { @@ -1312,6 +1463,20 @@ mod test { }; data.fields[i] = corrupt_field } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + let corrupt_field = match data.fields[i] { + TransactionAuthField::PublicKey(ref pubkey) => { + TransactionAuthField::PublicKey(StacksPublicKey::from_hex("0270790e675116a63a75008832d82ad93e4332882ab0797b0f156de9d739160a0b").unwrap()) + } + TransactionAuthField::Signature(ref key_encoding, ref sig) => { + let mut sig_bytes = sig.as_bytes().to_vec(); + sig_bytes[1] ^= 1u8; // this breaks the `r` paramter + let corrupt_sig = MessageSignature::from_raw(&sig_bytes); + TransactionAuthField::Signature(*key_encoding, corrupt_sig) + } + }; + data.fields[i] = corrupt_field + } } } } @@ -1325,15 +1490,20 @@ mod test { TransactionSpendingCondition::Multisig(ref data) => { let mut j = 0; for f in 0..data.fields.len() { - match data.fields[f] { - TransactionAuthField::Signature(_, _) => { - j = f; - break; - } - _ => { - continue; - } - } + if matches!(data.fields[f], TransactionAuthField::Signature(..)) { + j = f; + break; + }; + } + j + } + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + let mut j = 0; + for f in 0..data.fields.len() { + if matches!(data.fields[f], TransactionAuthField::Signature(..)) { + j = f; + break; + }; } j } @@ -1346,15 +1516,20 @@ mod test { TransactionSpendingCondition::Multisig(ref data) => { let mut j = 0; for f in 0..data.fields.len() { - match data.fields[f] { - TransactionAuthField::PublicKey(_) => { - j = f; - break; - } - _ => { - continue; - } - } + if matches!(data.fields[f], TransactionAuthField::PublicKey(_)) { + j = f; + break; + }; + } + j + } + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + let mut j = 0; + for f in 0..data.fields.len() { + if matches!(data.fields[f], TransactionAuthField::PublicKey(_)) { + j = f; + break; + }; } j } @@ -1446,6 +1621,14 @@ mod test { MultisigHashMode::P2SH }; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + data.hash_mode = + if data.hash_mode == OrderIndependentMultisigHashMode::P2SH { + OrderIndependentMultisigHashMode::P2WSH + } else { + OrderIndependentMultisigHashMode::P2SH + }; + } } } } @@ -1466,6 +1649,14 @@ mod test { MultisigHashMode::P2SH }; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + data.hash_mode = + if data.hash_mode == OrderIndependentMultisigHashMode::P2SH { + OrderIndependentMultisigHashMode::P2WSH + } else { + OrderIndependentMultisigHashMode::P2SH + }; + } } } if corrupt_sponsor { @@ -1484,6 +1675,14 @@ mod test { MultisigHashMode::P2SH }; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + data.hash_mode = + if data.hash_mode == OrderIndependentMultisigHashMode::P2SH { + OrderIndependentMultisigHashMode::P2WSH + } else { + OrderIndependentMultisigHashMode::P2SH + }; + } } } } @@ -1504,6 +1703,9 @@ mod test { TransactionSpendingCondition::Multisig(ref mut data) => { data.nonce += 1; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + data.nonce += 1; + } }; } } @@ -1516,6 +1718,9 @@ mod test { TransactionSpendingCondition::Multisig(ref mut data) => { data.nonce += 1; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + data.nonce += 1; + } } } if corrupt_sponsor { @@ -1526,6 +1731,9 @@ mod test { TransactionSpendingCondition::Multisig(ref mut data) => { data.nonce += 1; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + data.nonce += 1; + } } } } @@ -1566,6 +1774,10 @@ mod test { is_multisig_origin = true; data.signatures_required += 1; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + is_multisig_origin = true; + data.signatures_required += 1; + } }; } } @@ -1577,6 +1789,10 @@ mod test { is_multisig_origin = true; data.signatures_required += 1; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + is_multisig_origin = true; + data.signatures_required += 1; + } } } if corrupt_sponsor { @@ -1586,6 +1802,10 @@ mod test { is_multisig_sponsor = true; data.signatures_required += 1; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + is_multisig_sponsor = true; + data.signatures_required += 1; + } } } } @@ -3649,6 +3869,7 @@ mod test { 0, &TransactionAnchorMode::OnChainOnly, &TransactionPostConditionMode::Deny, + StacksEpochId::latest(), ); for tx in all_txs.iter() { let mut tx_bytes = vec![ @@ -3849,6 +4070,17 @@ mod test { assert_eq!(txid_before, signed_tx.txid()); } + fn is_order_independent_multisig(tx: &StacksTransaction) -> bool { + let spending_condition = match &tx.auth { + TransactionAuth::Standard(origin) => origin, + TransactionAuth::Sponsored(_, sponsor) => sponsor, + }; + match spending_condition { + TransactionSpendingCondition::OrderIndependentMultisig(..) => true, + _ => false, + } + } + fn check_oversign_origin_multisig(signed_tx: &StacksTransaction) -> () { let tx = signed_tx.clone(); let privk = StacksPrivateKey::from_hex( @@ -3865,7 +4097,14 @@ mod test { Ok(_) => assert!(false), Err(e) => match e { net_error::VerifyingError(msg) => { - assert_eq!(&msg, "Incorrect number of signatures") + if is_order_independent_multisig(&oversigned_tx) { + assert!( + msg.contains("Signer hash does not equal hash of public key(s)"), + "{msg}" + ) + } else { + assert_eq!(&msg, "Incorrect number of signatures") + } } _ => assert!(false), }, @@ -3922,7 +4161,14 @@ mod test { Ok(_) => assert!(false), Err(e) => match e { net_error::VerifyingError(msg) => { - assert_eq!(&msg, "Incorrect number of signatures") + if is_order_independent_multisig(&oversigned_tx) { + assert!( + msg.contains("Signer hash does not equal hash of public key(s)"), + "{msg}" + ) + } else { + assert_eq!(&msg, "Incorrect number of signatures") + } } _ => assert!(false), }, @@ -4066,7 +4312,7 @@ mod test { sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); @@ -4176,7 +4422,7 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("693cd53eb47d4749762d7cfaf46902bda5be5f97").unwrap() + bytes: Hash160::from_hex("693cd53eb47d4749762d7cfaf46902bda5be5f97").unwrap(), } ); @@ -4261,14 +4507,14 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); assert_eq!( sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("693cd53eb47d4749762d7cfaf46902bda5be5f97").unwrap() + bytes: Hash160::from_hex("693cd53eb47d4749762d7cfaf46902bda5be5f97").unwrap(), } ); @@ -4376,7 +4622,7 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap() + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), } ); @@ -4486,14 +4732,14 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); assert_eq!( sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap() + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), } ); @@ -4614,7 +4860,7 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap() + bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap(), } ); @@ -4727,14 +4973,14 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); assert_eq!( sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap() + bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap(), } ); @@ -4853,7 +5099,7 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("2136367c9c740e7dbed8795afdf8a6d273096718").unwrap() + bytes: Hash160::from_hex("2136367c9c740e7dbed8795afdf8a6d273096718").unwrap(), } ); @@ -4963,14 +5209,14 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); assert_eq!( sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("2136367c9c740e7dbed8795afdf8a6d273096718").unwrap() + bytes: Hash160::from_hex("2136367c9c740e7dbed8795afdf8a6d273096718").unwrap(), } ); @@ -5076,7 +5322,7 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("f15fa5c59d14ffcb615fa6153851cd802bb312d2").unwrap() + bytes: Hash160::from_hex("f15fa5c59d14ffcb615fa6153851cd802bb312d2").unwrap(), } ); @@ -5157,14 +5403,14 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); assert_eq!( sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("f15fa5c59d14ffcb615fa6153851cd802bb312d2").unwrap() + bytes: Hash160::from_hex("f15fa5c59d14ffcb615fa6153851cd802bb312d2").unwrap(), } ); @@ -5268,7 +5514,7 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap() + bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap(), } ); @@ -5379,14 +5625,14 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); assert_eq!( sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap() + bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap(), } ); @@ -5475,6 +5721,2972 @@ mod test { } } - // TODO(test): test with different tx versions - // TODO(test): test error values for signing and verifying + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2sh() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig2 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = tx.append_next_origin(&pubk_1); + let _ = tx.append_origin_signature(sig2, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 2); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_public_key()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[0].as_public_key().unwrap(), pubk_1); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2sh_extra_signers() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig2 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_2) + .unwrap(); + let sig1 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_1) + .unwrap(); + + let _ = tx.append_origin_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_origin_signature(sig2, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + //check_oversign_origin_multisig(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 3); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_signature()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_order_independent_p2sh() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig2 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = + origin_tx.append_sponsor_signature(sig2, TransactionPublicKeyEncoding::Compressed); + let _ = origin_tx.append_next_sponsor(&pubk_3); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2sh_uncompressed() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e0", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d2", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig2 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = tx.append_next_origin(&pubk_1); + let _ = tx.append_origin_signature(sig2, TransactionPublicKeyEncoding::Uncompressed); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Uncompressed); + + check_oversign_origin_multisig(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 2); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_public_key()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[0].as_public_key().unwrap(), pubk_1); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_order_independent_p2sh_uncompressed() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e0", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d2", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig2 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = origin_tx + .append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Uncompressed); + let _ = origin_tx + .append_sponsor_signature(sig2, TransactionPublicKeyEncoding::Uncompressed); + let _ = origin_tx.append_next_sponsor(&pubk_3); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2sh_mixed() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig1 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_1) + .unwrap(); + + let _ = tx.append_origin_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_2); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 2); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2sh_mixed_3_out_of_9() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + let privk_4 = StacksPrivateKey::from_hex( + "3beb8916404874f5d5de162c95470951de5b4a7f6ec8d7a20511551821f16db501", + ) + .unwrap(); + let privk_5 = StacksPrivateKey::from_hex( + "601aa0939e98efec29a4dc645377c9d4acaa0b7318444ec8fd7d090d0b36d85b01", + ) + .unwrap(); + let privk_6 = StacksPrivateKey::from_hex( + "5a4ca3db5a3b36bc32d9f2f0894435cbc4b2b1207e95ee283616d9a0797210da01", + ) + .unwrap(); + let privk_7 = StacksPrivateKey::from_hex( + "068856c242bfebdc57700fa598fae4e8ebb6b5f6bf932177018071489737d3ff01", + ) + .unwrap(); + let privk_8 = StacksPrivateKey::from_hex( + "a07a397f6b31c803f5d7f0c4620576cb03c66c12cdbdb6cd91d001d6f0052de201", + ) + .unwrap(); + let privk_9 = StacksPrivateKey::from_hex( + "f395129abc42c57e394dcceebeca9f51f0cb0a3f1c3a899d62e40b9340c7cc1101", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + let pubk_4 = StacksPublicKey::from_private(&privk_4); + let pubk_5 = StacksPublicKey::from_private(&privk_5); + let pubk_6 = StacksPublicKey::from_private(&privk_6); + let pubk_7 = StacksPublicKey::from_private(&privk_7); + let pubk_8 = StacksPublicKey::from_private(&privk_8); + let pubk_9 = StacksPublicKey::from_private(&privk_9); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 3, + vec![ + pubk_1.clone(), + pubk_2.clone(), + pubk_3.clone(), + pubk_4.clone(), + pubk_5.clone(), + pubk_6.clone(), + pubk_7.clone(), + pubk_8.clone(), + pubk_9.clone(), + ], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("315d672961ef2583faf4107ab4ec5566014c867c").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig1 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_1) + .unwrap(); + let sig9 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_9) + .unwrap(); + + let _ = tx.append_origin_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_2); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_4); + let _ = tx.append_next_origin(&pubk_5); + let _ = tx.append_next_origin(&pubk_6); + let _ = tx.append_next_origin(&pubk_7); + let _ = tx.append_next_origin(&pubk_8); + let _ = tx.append_origin_signature(sig9, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 3); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 9); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + assert!(data.fields[3].is_public_key()); + assert!(data.fields[4].is_public_key()); + assert!(data.fields[5].is_public_key()); + assert!(data.fields[6].is_public_key()); + assert!(data.fields[7].is_public_key()); + assert!(data.fields[8].is_signature()); + + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + assert_eq!(data.fields[3].as_public_key().unwrap(), pubk_4); + assert_eq!(data.fields[4].as_public_key().unwrap(), pubk_5); + assert_eq!(data.fields[5].as_public_key().unwrap(), pubk_6); + assert_eq!(data.fields[6].as_public_key().unwrap(), pubk_7); + assert_eq!(data.fields[7].as_public_key().unwrap(), pubk_8); + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[8].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_order_independent_p2sh_mixed() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig3 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_3) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = origin_tx.append_next_sponsor(&pubk_2); + let _ = + origin_tx.append_sponsor_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_order_independent_p2sh_mixed_5_out_of_5() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + let privk_4 = StacksPrivateKey::from_hex( + "3beb8916404874f5d5de162c95470951de5b4a7f6ec8d7a20511551821f16db501", + ) + .unwrap(); + let privk_5 = StacksPrivateKey::from_hex( + "601aa0939e98efec29a4dc645377c9d4acaa0b7318444ec8fd7d090d0b36d85b01", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + let pubk_4 = StacksPublicKey::from_private(&privk_4); + let pubk_5 = StacksPublicKey::from_private(&privk_5); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 5, + vec![ + pubk_1.clone(), + pubk_2.clone(), + pubk_3.clone(), + pubk_4.clone(), + pubk_5.clone(), + ], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("fc29d14be615b0f72a66b920040c2b5b8124990b").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig3 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_3) + .unwrap(); + let sig2 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_2) + .unwrap(); + let sig4 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_4) + .unwrap(); + let sig5 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_5) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = + origin_tx.append_sponsor_signature(sig2, TransactionPublicKeyEncoding::Compressed); + let _ = + origin_tx.append_sponsor_signature(sig3, TransactionPublicKeyEncoding::Compressed); + let _ = + origin_tx.append_sponsor_signature(sig4, TransactionPublicKeyEncoding::Compressed); + let _ = + origin_tx.append_sponsor_signature(sig5, TransactionPublicKeyEncoding::Compressed); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 5); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 5); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_signature()); + assert!(data.fields[3].is_signature()); + assert!(data.fields[4].is_signature()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[3].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[4].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2wsh() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig1 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_1) + .unwrap(); + + let _ = tx.append_origin_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_2); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut tx); + check_oversign_origin_multisig_uncompressed(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 2); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2wsh_4_out_of_6() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + let privk_4 = StacksPrivateKey::from_hex( + "3beb8916404874f5d5de162c95470951de5b4a7f6ec8d7a20511551821f16db501", + ) + .unwrap(); + let privk_5 = StacksPrivateKey::from_hex( + "601aa0939e98efec29a4dc645377c9d4acaa0b7318444ec8fd7d090d0b36d85b01", + ) + .unwrap(); + let privk_6 = StacksPrivateKey::from_hex( + "5a4ca3db5a3b36bc32d9f2f0894435cbc4b2b1207e95ee283616d9a0797210da01", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + let pubk_4 = StacksPublicKey::from_private(&privk_4); + let pubk_5 = StacksPublicKey::from_private(&privk_5); + let pubk_6 = StacksPublicKey::from_private(&privk_6); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 4, + vec![ + pubk_1.clone(), + pubk_2.clone(), + pubk_3.clone(), + pubk_4.clone(), + pubk_5.clone(), + pubk_6.clone(), + ], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("e2a4ae14ffb0a4a0982a06d07b97d57268d2bf94").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig1 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_1) + .unwrap(); + let sig6 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_6) + .unwrap(); + let sig5 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_5) + .unwrap(); + + let _ = tx.append_origin_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_2); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_4); + let _ = tx.append_origin_signature(sig5, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_origin_signature(sig6, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut tx); + check_oversign_origin_multisig_uncompressed(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 4); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 6); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + assert!(data.fields[3].is_public_key()); + assert!(data.fields[4].is_signature()); + assert!(data.fields[5].is_signature()); + + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + assert_eq!(data.fields[3].as_public_key().unwrap(), pubk_4); + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[4].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[5].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_order_independent_p2wsh() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig3 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_3) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = origin_tx.append_next_sponsor(&pubk_2); + let _ = + origin_tx.append_sponsor_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + check_oversign_sponsor_multisig_uncompressed(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_order_independent_p2wsh_2_out_of_7() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + let privk_4 = StacksPrivateKey::from_hex( + "3beb8916404874f5d5de162c95470951de5b4a7f6ec8d7a20511551821f16db501", + ) + .unwrap(); + let privk_5 = StacksPrivateKey::from_hex( + "601aa0939e98efec29a4dc645377c9d4acaa0b7318444ec8fd7d090d0b36d85b01", + ) + .unwrap(); + let privk_6 = StacksPrivateKey::from_hex( + "5a4ca3db5a3b36bc32d9f2f0894435cbc4b2b1207e95ee283616d9a0797210da01", + ) + .unwrap(); + let privk_7 = StacksPrivateKey::from_hex( + "068856c242bfebdc57700fa598fae4e8ebb6b5f6bf932177018071489737d3ff01", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + let pubk_4 = StacksPublicKey::from_private(&privk_4); + let pubk_5 = StacksPublicKey::from_private(&privk_5); + let pubk_6 = StacksPublicKey::from_private(&privk_6); + let pubk_7 = StacksPublicKey::from_private(&privk_7); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 2, + vec![ + pubk_1.clone(), + pubk_2.clone(), + pubk_3.clone(), + pubk_4.clone(), + pubk_5.clone(), + pubk_6.clone(), + pubk_7.clone(), + ], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("e3001c2b12f24ba279116d7001e3bd82b2b5eab4").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig7 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_7) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = origin_tx.append_next_sponsor(&pubk_2); + let _ = origin_tx.append_next_sponsor(&pubk_3); + let _ = origin_tx.append_next_sponsor(&pubk_4); + let _ = origin_tx.append_next_sponsor(&pubk_5); + let _ = origin_tx.append_next_sponsor(&pubk_6); + let _ = + origin_tx.append_sponsor_signature(sig7, TransactionPublicKeyEncoding::Compressed); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + check_oversign_sponsor_multisig_uncompressed(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 7); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_public_key()); + assert!(data.fields[3].is_public_key()); + assert!(data.fields[4].is_public_key()); + assert!(data.fields[5].is_public_key()); + assert!(data.fields[6].is_signature()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[6].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + assert_eq!(data.fields[3].as_public_key().unwrap(), pubk_4); + assert_eq!(data.fields[4].as_public_key().unwrap(), pubk_5); + assert_eq!(data.fields[5].as_public_key().unwrap(), pubk_6); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_both_multisig_p2sh() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let order_independent_origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + let order_independent_origin_address = + order_independent_origin_auth.origin().address_mainnet(); + + assert_eq!(origin_address, order_independent_origin_address); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + let order_independent_txs = tx_stacks_transaction_test_txs(&order_independent_origin_auth); + + assert_eq!(txs.len(), order_independent_txs.len()); + + for tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&privk_1).unwrap(); + tx_signer.sign_origin(&privk_2).unwrap(); + tx_signer.append_origin(&pubk_3).unwrap(); + let mut signed_tx = tx_signer.get_tx().unwrap(); + assert_eq!(signed_tx.auth().origin().num_signatures(), 2); + + check_oversign_origin_multisig(&mut signed_tx); + check_sign_no_sponsor(&mut signed_tx); + + // tx and signed_tx are otherwise equal + assert_eq!(tx.version, signed_tx.version); + assert_eq!(tx.get_tx_fee(), signed_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), signed_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), signed_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, signed_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, signed_tx.post_condition_mode); + assert_eq!(tx.post_conditions, signed_tx.post_conditions); + assert_eq!(tx.payload, signed_tx.payload); + + match signed_tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::Multisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&signed_tx, true, false); + } + + for mut order_independent_tx in order_independent_txs { + assert_eq!(order_independent_tx.auth().origin().num_signatures(), 0); + + let order_independent_initial_sig_hash = order_independent_tx.sign_begin(); + let sig3 = order_independent_tx + .sign_no_append_origin(&order_independent_initial_sig_hash, &privk_3) + .unwrap(); + let sig2 = order_independent_tx + .sign_no_append_origin(&order_independent_initial_sig_hash, &privk_2) + .unwrap(); + + let _ = order_independent_tx.append_next_origin(&pubk_1); + let _ = order_independent_tx + .append_origin_signature(sig2, TransactionPublicKeyEncoding::Compressed); + let _ = order_independent_tx + .append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut order_independent_tx); + check_sign_no_sponsor(&mut order_independent_tx); + + assert_eq!(order_independent_tx.auth().origin().num_signatures(), 2); + + match order_independent_tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_public_key()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[0].as_public_key().unwrap(), pubk_1); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&order_independent_tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_both_multisig_p2sh_uncompressed() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e0", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d2", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let order_independent_origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + let order_independent_origin_address = + order_independent_origin_auth.origin().address_mainnet(); + assert_eq!(origin_address, order_independent_origin_address); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + let order_independent_txs = tx_stacks_transaction_test_txs(&order_independent_origin_auth); + + assert_eq!(txs.len(), order_independent_txs.len()); + + for tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&privk_1).unwrap(); + tx_signer.sign_origin(&privk_2).unwrap(); + tx_signer.append_origin(&pubk_3).unwrap(); + + let mut signed_tx = tx_signer.get_tx().unwrap(); + + check_oversign_origin_multisig(&mut signed_tx); + check_sign_no_sponsor(&mut signed_tx); + + assert_eq!(signed_tx.auth().origin().num_signatures(), 2); + + // tx and signed_tx are otherwise equal + assert_eq!(tx.version, signed_tx.version); + assert_eq!(tx.chain_id, signed_tx.chain_id); + assert_eq!(tx.get_tx_fee(), signed_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), signed_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), signed_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, signed_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, signed_tx.post_condition_mode); + assert_eq!(tx.post_conditions, signed_tx.post_conditions); + assert_eq!(tx.payload, signed_tx.payload); + + // auth is standard and first two auth fields are signatures for uncompressed keys. + // third field is the third public key + match signed_tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::Multisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&signed_tx, true, false); + } + + for mut tx in order_independent_txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig2 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = tx.append_next_origin(&pubk_1); + let _ = tx.append_origin_signature(sig2, TransactionPublicKeyEncoding::Uncompressed); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Uncompressed); + + check_oversign_origin_multisig(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 2); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_public_key()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[0].as_public_key().unwrap(), pubk_1); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_both_multisig_p2wsh() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let order_independent_origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + let order_independent_origin_address = + order_independent_origin_auth.origin().address_mainnet(); + assert_eq!(origin_address, order_independent_origin_address); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + let order_independent_txs = tx_stacks_transaction_test_txs(&order_independent_origin_auth); + + for tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&privk_1).unwrap(); + tx_signer.sign_origin(&privk_2).unwrap(); + tx_signer.append_origin(&pubk_3).unwrap(); + let mut signed_tx = tx_signer.get_tx().unwrap(); + + check_oversign_origin_multisig(&mut signed_tx); + check_oversign_origin_multisig_uncompressed(&mut signed_tx); + check_sign_no_sponsor(&mut signed_tx); + + assert_eq!(signed_tx.auth().origin().num_signatures(), 2); + + // tx and signed_tx are otherwise equal + assert_eq!(tx.version, signed_tx.version); + assert_eq!(tx.get_tx_fee(), signed_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), signed_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), signed_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, signed_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, signed_tx.post_condition_mode); + assert_eq!(tx.post_conditions, signed_tx.post_conditions); + assert_eq!(tx.payload, signed_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match signed_tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::Multisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&signed_tx, true, false); + } + + for mut tx in order_independent_txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig1 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_1) + .unwrap(); + + let _ = tx.append_origin_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_2); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut tx); + check_oversign_origin_multisig_uncompressed(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 2); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_both_multisig_p2sh() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let real_order_independent_sponsor = + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + let order_independent_sponsor_address = real_order_independent_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!(sponsor_address, order_independent_sponsor_address); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + let order_independent_txs = tx_stacks_transaction_test_txs(&auth); // no difference + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + tx_signer.resume(&origin_tx); + + tx_signer.sign_sponsor(&privk_1).unwrap(); + tx_signer.sign_sponsor(&privk_2).unwrap(); + tx_signer.append_sponsor(&pubk_3).unwrap(); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + let mut signed_tx = tx_signer.get_tx().unwrap(); + + check_oversign_origin_singlesig(&mut signed_tx); + check_oversign_sponsor_multisig(&mut signed_tx); + + assert_eq!(signed_tx.auth().origin().num_signatures(), 1); + assert_eq!(signed_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and signed_tx are otherwise equal + assert_eq!(tx.version, signed_tx.version); + assert_eq!(tx.chain_id, signed_tx.chain_id); + assert_eq!(tx.get_tx_fee(), signed_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), signed_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), signed_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, signed_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, signed_tx.post_condition_mode); + assert_eq!(tx.post_conditions, signed_tx.post_conditions); + assert_eq!(tx.payload, signed_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match signed_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::Multisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&signed_tx, true, false); + test_signature_and_corruption(&signed_tx, false, true); + } + + for mut tx in order_independent_txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx + .auth + .set_sponsor(real_order_independent_sponsor.clone()) + .unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig2 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = + origin_tx.append_sponsor_signature(sig2, TransactionPublicKeyEncoding::Compressed); + let _ = origin_tx.append_next_sponsor(&pubk_3); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_both_multisig_p2sh_uncompressed() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e0", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d2", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let real_order_independent_sponsor = + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + let order_independent_sponsor_address = real_order_independent_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!(sponsor_address, order_independent_sponsor_address); + + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + let order_independent_txs = tx_stacks_transaction_test_txs(&auth); // no difference + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig2 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = origin_tx + .append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Uncompressed); + let _ = origin_tx + .append_sponsor_signature(sig2, TransactionPublicKeyEncoding::Uncompressed); + let _ = origin_tx.append_next_sponsor(&pubk_3); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + + for mut tx in order_independent_txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx + .auth + .set_sponsor(real_order_independent_sponsor.clone()) + .unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig2 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = origin_tx + .append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Uncompressed); + let _ = origin_tx + .append_sponsor_signature(sig2, TransactionPublicKeyEncoding::Uncompressed); + let _ = origin_tx.append_next_sponsor(&pubk_3); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_both_multisig_p2wsh() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let real_order_independent_sponsor = + TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + let order_independent_sponsor_address = real_order_independent_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!(sponsor_address, order_independent_sponsor_address); + + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + let order_independent_txs = tx_stacks_transaction_test_txs(&auth); // no difference + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + tx_signer.resume(&origin_tx); + + tx_signer.sign_sponsor(&privk_1).unwrap(); + tx_signer.sign_sponsor(&privk_2).unwrap(); + tx_signer.append_sponsor(&pubk_3).unwrap(); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + let mut signed_tx = tx_signer.get_tx().unwrap(); + + check_oversign_origin_singlesig(&mut signed_tx); + check_oversign_sponsor_multisig(&mut signed_tx); + check_oversign_sponsor_multisig_uncompressed(&mut signed_tx); + + assert_eq!(signed_tx.auth().origin().num_signatures(), 1); + assert_eq!(signed_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and signed_tx are otherwise equal + assert_eq!(tx.version, signed_tx.version); + assert_eq!(tx.chain_id, signed_tx.chain_id); + assert_eq!(tx.get_tx_fee(), signed_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), signed_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), signed_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, signed_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, signed_tx.post_condition_mode); + assert_eq!(tx.post_conditions, signed_tx.post_conditions); + assert_eq!(tx.payload, signed_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match signed_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::Multisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&signed_tx, true, false); + test_signature_and_corruption(&signed_tx, false, true); + } + + for mut tx in order_independent_txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx + .auth + .set_sponsor(real_order_independent_sponsor.clone()) + .unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig3 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_3) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = origin_tx.append_next_sponsor(&pubk_2); + let _ = + origin_tx.append_sponsor_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + check_oversign_sponsor_multisig_uncompressed(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } } diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index 3e4088b6eb..fed0e70e95 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -1,7 +1,12 @@ use std::path::PathBuf; use std::str::FromStr; +use clarity::util::hash::Sha512Trunc256Sum; use clarity::vm::analysis::AnalysisDatabase; +use clarity::vm::database::sqlite::{ + sqlite_get_contract_hash, sqlite_get_metadata, sqlite_get_metadata_manual, + sqlite_insert_metadata, +}; use clarity::vm::database::{ BurnStateDB, ClarityBackingStore, ClarityDatabase, HeadersDB, SpecialCaseHandler, SqliteConnection, @@ -451,6 +456,39 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { error!("Attempted to commit changes to read-only MARF"); panic!("BUG: attempted commit to read-only MARF"); } + + fn get_contract_hash( + &mut self, + contract: &QualifiedContractIdentifier, + ) -> InterpreterResult<(StacksBlockId, Sha512Trunc256Sum)> { + sqlite_get_contract_hash(self, contract) + } + + fn insert_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, + ) -> InterpreterResult<()> { + sqlite_insert_metadata(self, contract, key, value) + } + + fn get_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata(self, contract, key) + } + + fn get_metadata_manual( + &mut self, + at_height: u32, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata_manual(self, at_height, contract, key) + } } impl<'a> WritableMarfStore<'a> { @@ -692,4 +730,37 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { .insert_batch(&keys, values) .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure".into()).into()) } + + fn get_contract_hash( + &mut self, + contract: &QualifiedContractIdentifier, + ) -> InterpreterResult<(StacksBlockId, Sha512Trunc256Sum)> { + sqlite_get_contract_hash(self, contract) + } + + fn insert_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, + ) -> InterpreterResult<()> { + sqlite_insert_metadata(self, contract, key, value) + } + + fn get_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata(self, contract, key) + } + + fn get_metadata_manual( + &mut self, + at_height: u32, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata_manual(self, at_height, contract, key) + } } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index c9c21957f3..be25078521 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -1,12 +1,17 @@ use std::ops::{Deref, DerefMut}; +use clarity::util::hash::Sha512Trunc256Sum; use clarity::vm::analysis::AnalysisDatabase; +use clarity::vm::database::sqlite::{ + sqlite_get_contract_hash, sqlite_get_metadata, sqlite_get_metadata_manual, + sqlite_insert_metadata, +}; use clarity::vm::database::{ BurnStateDB, ClarityBackingStore, ClarityDatabase, HeadersDB, SpecialCaseHandler, SqliteConnection, NULL_BURN_STATE_DB, NULL_HEADER_DB, }; use clarity::vm::errors::{InterpreterResult, RuntimeErrorType}; -use clarity::vm::types::{PrincipalData, TupleData}; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, TupleData}; use rusqlite::{Connection, OptionalExtension, Row, ToSql}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, @@ -770,4 +775,37 @@ impl ClarityBackingStore for MemoryBackingStore { } Ok(()) } + + fn get_contract_hash( + &mut self, + contract: &QualifiedContractIdentifier, + ) -> InterpreterResult<(StacksBlockId, Sha512Trunc256Sum)> { + sqlite_get_contract_hash(self, contract) + } + + fn insert_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, + ) -> InterpreterResult<()> { + sqlite_insert_metadata(self, contract, key, value) + } + + fn get_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata(self, contract, key) + } + + fn get_metadata_manual( + &mut self, + at_height: u32, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata_manual(self, at_height, contract, key) + } } diff --git a/stackslib/src/clarity_vm/tests/analysis_costs.rs b/stackslib/src/clarity_vm/tests/analysis_costs.rs index 321d4939a0..4fe887f2c3 100644 --- a/stackslib/src/clarity_vm/tests/analysis_costs.rs +++ b/stackslib/src/clarity_vm/tests/analysis_costs.rs @@ -258,7 +258,7 @@ fn epoch_21_test_all(use_mainnet: bool, version: ClarityVersion) { let baseline = test_tracked_costs("1", StacksEpochId::Epoch21, version, 0, &mut instance); for (ix, f) in NativeFunctions::ALL.iter().enumerate() { - if version < f.get_version() { + if version < f.get_min_version() || f.get_max_version().map_or(false, |max| version > max) { continue; } @@ -295,7 +295,7 @@ fn epoch_205_test_all(use_mainnet: bool) { ); for (ix, f) in NativeFunctions::ALL.iter().enumerate() { - if f.get_version() == ClarityVersion::Clarity1 { + if f.get_min_version() == ClarityVersion::Clarity1 { let test = get_simple_test(f); let cost = test_tracked_costs( test, diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index b9916dac11..0cdc1ad8bf 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use clarity::types::StacksEpochId; use clarity::vm::ast::errors::ParseErrors; use clarity::vm::ast::ASTRules; use clarity::vm::clarity::Error as ClarityError; @@ -30,7 +31,7 @@ use clarity::vm::tests::{ use clarity::vm::types::SequenceData::Buffer; use clarity::vm::types::{ BuffData, OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, - StandardPrincipalData, TypeSignature, Value, + StandardPrincipalData, TupleData, TypeSignature, Value, }; use clarity::vm::Value::Sequence; use clarity::vm::{ast, execute as vm_execute, ClarityVersion}; @@ -416,7 +417,7 @@ fn trait_invocation_cross_epoch() { let sender = StacksAddress::burn_address(false).into(); - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); let clarity_version = ClarityVersion::default_for_epoch(epoch); @@ -425,7 +426,7 @@ fn trait_invocation_cross_epoch() { publish_contract(conn, &use_contract_id, use_contract, clarity_version).unwrap(); }); // Advance another block so we get to Stacks 2.1. This is the last block in 2.05 - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); sim.execute_next_block(|_| {}); // now in Stacks 2.1 sim.execute_next_block_as_conn(|conn| { @@ -435,7 +436,7 @@ fn trait_invocation_cross_epoch() { publish_contract(conn, &invoke_contract_id, invoke_contract, clarity_version).unwrap(); }); - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { @@ -452,7 +453,7 @@ fn trait_invocation_cross_epoch() { }); }); - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); // now in Stacks 2.2 sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); @@ -476,7 +477,7 @@ fn trait_invocation_cross_epoch() { }); }); - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { @@ -500,7 +501,7 @@ fn trait_invocation_cross_epoch() { }); // should now be in Stacks 2.3, so the invocation should work again! - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { @@ -517,7 +518,7 @@ fn trait_invocation_cross_epoch() { }); }); - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { @@ -882,3 +883,717 @@ fn trait_with_trait_invocation_cross_epoch() { }); }); } + +#[test] +fn test_block_heights() { + let mut sim = ClarityTestSim::new(); + sim.epoch_bounds = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + let contract_identifier1 = QualifiedContractIdentifier::local("test-contract-1").unwrap(); + let contract_identifier2 = QualifiedContractIdentifier::local("test-contract-2").unwrap(); + + // Advance to epoch 3.0 + while sim.block_height <= 7 { + sim.execute_next_block(|_env| {}); + } + + let block_height = sim.block_height as u128; + sim.execute_next_block_as_conn(|conn| { + let epoch = conn.get_epoch(); + assert_eq!(epoch, StacksEpochId::Epoch30); + + // This version uses the Clarity 1 / 2 keywords + let contract_clarity1 = + "(define-private (test-func) { burn-block-height: burn-block-height, block-height: block-height })"; + // This version uses the Clarity 3 keywords + let contract_clarity3 = + "(define-private (test-func) { burn-block-height: burn-block-height, stacks-block-height: stacks-block-height, tenure-height: tenure-height })"; + + // Check both contracts in Clarity 1, publish the Clarity 1 contract + conn.as_transaction(|clarity_db| { + // analyze the contracts as Clarity 1 + let (ast, analysis) = clarity_db.analyze_smart_contract( + &contract_identifier1, + ClarityVersion::Clarity1, + &contract_clarity1, + ASTRules::PrecheckSize, + ).unwrap(); + + let res = clarity_db.analyze_smart_contract( + &contract_identifier2, + ClarityVersion::Clarity1, + &contract_clarity3, + ASTRules::PrecheckSize, + ); + if let Err(ClarityError::Analysis(check_error)) = res { + if let CheckErrors::UndefinedVariable(var_name) = check_error.err { + assert_eq!(var_name, "stacks-block-height"); + } else { + panic!("Bad analysis error: {:?}", &check_error); + } + } else { + panic!("Bad analysis result: {:?}", &res); + } + + // Publish the Clarity 1 contract + clarity_db + .initialize_smart_contract( + &contract_identifier1, + ClarityVersion::Clarity1, + &ast, + contract_clarity1, + None, + |_, _| false, + ).unwrap(); + + // analyze the contracts as Clarity 2 + let (ast, analysis) = clarity_db.analyze_smart_contract( + &contract_identifier1, + ClarityVersion::Clarity2, + &contract_clarity1, + ASTRules::PrecheckSize, + ).unwrap(); + + let res = clarity_db.analyze_smart_contract( + &contract_identifier2, + ClarityVersion::Clarity2, + &contract_clarity3, + ASTRules::PrecheckSize, + ); + if let Err(ClarityError::Analysis(check_error)) = res { + if let CheckErrors::UndefinedVariable(var_name) = check_error.err { + assert_eq!(var_name, "stacks-block-height"); + } else { + panic!("Bad analysis error: {:?}", &check_error); + } + } else { + panic!("Bad analysis result: {:?}", &res); + } + + // analyze the contracts as Clarity 3 + let res = clarity_db.analyze_smart_contract( + &contract_identifier1, + ClarityVersion::Clarity3, + &contract_clarity1, + ASTRules::PrecheckSize, + ); + if let Err(ClarityError::Analysis(check_error)) = res { + if let CheckErrors::UndefinedVariable(var_name) = check_error.err { + assert_eq!(var_name, "block-height"); + } else { + panic!("Bad analysis error: {:?}", &check_error); + } + } else { + panic!("Bad analysis result: {:?}", &res); + } + + let (ast, analysis) = clarity_db.analyze_smart_contract( + &contract_identifier2, + ClarityVersion::Clarity3, + &contract_clarity3, + ASTRules::PrecheckSize, + ).unwrap(); + + // Publish the Clarity 3 contract + clarity_db + .initialize_smart_contract( + &contract_identifier2, + ClarityVersion::Clarity3, + &ast, + contract_clarity3, + None, + |_, _| false, + ).unwrap(); + }); + + // Call the contracts and validate the results + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Tuple(TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(block_height)), + ("block-height".into(), Value::UInt(block_height + 1)) + ]).unwrap()), + tx.eval_read_only(&contract_identifier1, "(test-func)") + .unwrap() + ); + assert_eq!( + Value::Tuple(TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(block_height)), + ("stacks-block-height".into(), Value::UInt(block_height + 1)), + ("tenure-height".into(), Value::UInt(block_height + 1)) + ]).unwrap()), + tx.eval_read_only(&contract_identifier2, "(test-func)") + .unwrap() + ); + }); + + // Call the contracts in the next block and validate the results + let block_height = sim.block_height as u128; + sim.execute_next_block_as_conn(|conn| { + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(block_height)), + ("block-height".into(), Value::UInt(block_height + 1)), + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier1, "(test-func)") + .unwrap() + ); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(block_height)), + ("stacks-block-height".into(), Value::UInt(block_height + 1)), + ("tenure-height".into(), Value::UInt(block_height + 1)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier2, "(test-func)") + .unwrap() + ); + }); + + // Call the contracts in the next block with no new tenure and validate the results + let block_height = sim.block_height as u128; + sim.execute_next_block_as_conn_with_tenure(false, |conn| { + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(block_height)), + ("block-height".into(), Value::UInt(block_height)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier1, "(test-func)") + .unwrap() + ); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(block_height)), + ("stacks-block-height".into(), Value::UInt(block_height + 1)), + ("tenure-height".into(), Value::UInt(block_height)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier2, "(test-func)") + .unwrap() + ); + }); + + // Call the contracts in the next block with no new tenure and validate the results + let block_height = sim.block_height as u128; + sim.execute_next_block_as_conn(|conn| { + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(block_height)), + ("block-height".into(), Value::UInt(block_height)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier1, "(test-func)") + .unwrap() + ); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(block_height)), + ("stacks-block-height".into(), Value::UInt(block_height + 1)), + ("tenure-height".into(), Value::UInt(block_height)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier2, "(test-func)") + .unwrap() + ); + }); +} + +/// Test calling into a Clarity 1 or Clarity 2 contract which have bound +/// variable names `stacks-block-height` and `tenure-height` from a Clarity 3 +/// contract. +#[test] +fn test_block_heights_across_versions() { + let mut sim = ClarityTestSim::new(); + sim.epoch_bounds = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + let contract_id_e2c1 = QualifiedContractIdentifier::local("epoch-2-clarity-1").unwrap(); + let contract_id_e2c2 = QualifiedContractIdentifier::local("epoch-2-clarity-2").unwrap(); + let contract_id_e3c3 = QualifiedContractIdentifier::local("epoch-3-clarity-3").unwrap(); + + let contract_e2c1_2 = r#" + (define-read-only (get-height (stacks-block-height int) (tenure-height bool)) + (if tenure-height + stacks-block-height + (+ stacks-block-height 1) + ) + ) + "#; + let contract_e3c3 = format!( + r#" + (define-read-only (call-e2 (version int)) + (if (is-eq version 1) + (contract-call? '{contract_id_e2c1} get-height 123 false) + (contract-call? '{contract_id_e2c2} get-height 456 true) + ) + ) + "# + ); + + sim.execute_next_block(|_env| {}); + + // Deploy the Clarity 1 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 1 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e2c1, + ClarityVersion::Clarity1, + &contract_e2c1_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + clarity_db + .save_analysis(&contract_id_e2c1, &analysis) + .unwrap(); + + // Publish the Clarity 1 contract + clarity_db + .initialize_smart_contract( + &contract_id_e2c1, + ClarityVersion::Clarity1, + &ast, + contract_e2c1_2, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Deploy the Clarity 2 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 2 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e2c2, + ClarityVersion::Clarity2, + &contract_e2c1_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + clarity_db + .save_analysis(&contract_id_e2c2, &analysis) + .unwrap(); + + // Publish the Clarity 2 contract + clarity_db + .initialize_smart_contract( + &contract_id_e2c2, + ClarityVersion::Clarity2, + &ast, + contract_e2c1_2, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Advance to epoch 3 + while sim.block_height <= 7 { + sim.execute_next_block(|_env| {}); + } + + // Deploy the Clarity 3 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 3 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e3c3, + ClarityVersion::Clarity3, + &contract_e3c3, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 3 contract + clarity_db + .initialize_smart_contract( + &contract_id_e3c3, + ClarityVersion::Clarity3, + &ast, + &contract_e3c3, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Call the Clarity 3 contract and validate the results + sim.execute_next_block_as_conn(|conn| { + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Int(124), + tx.eval_read_only(&contract_id_e3c3, "(call-e2 1)").unwrap() + ); + assert_eq!( + Value::Int(456), + tx.eval_read_only(&contract_id_e3c3, "(call-e2 2)").unwrap() + ); + }); +} + +/// Test passing a Clarity 3 contract using `stacks-block-height` and +/// `tenure-height` as a trait into a Clarity 1 and Clarity 2 contract. +#[test] +fn test_block_heights_across_versions_traits_3_from_2() { + let mut sim = ClarityTestSim::new(); + sim.epoch_bounds = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + let contract_id_e2c1 = QualifiedContractIdentifier::local("epoch-2-clarity-1").unwrap(); + let contract_id_e2c2 = QualifiedContractIdentifier::local("epoch-2-clarity-2").unwrap(); + let contract_id_e3c3 = QualifiedContractIdentifier::local("epoch-3-clarity-3").unwrap(); + + let contract_e2c1_2 = r#" + (define-trait getter ((get-int () (response uint uint)))) + (define-public (get-it (get-trait )) + (contract-call? get-trait get-int) + ) + "#; + let contract_e3c3 = format!( + r#" + (define-public (get-int) + (ok (+ stacks-block-height tenure-height)) + ) + "# + ); + + sim.execute_next_block(|_env| {}); + + // Deploy the Clarity 1 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 1 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e2c1, + ClarityVersion::Clarity1, + &contract_e2c1_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 1 contract + clarity_db + .initialize_smart_contract( + &contract_id_e2c1, + ClarityVersion::Clarity1, + &ast, + contract_e2c1_2, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Deploy the Clarity 2 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 2 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e2c2, + ClarityVersion::Clarity2, + &contract_e2c1_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 2 contract + clarity_db + .initialize_smart_contract( + &contract_id_e2c2, + ClarityVersion::Clarity2, + &ast, + contract_e2c1_2, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Advance to epoch 3 + while sim.block_height <= 7 { + sim.execute_next_block(|_env| {}); + } + + // Deploy the Clarity 3 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 3 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e3c3, + ClarityVersion::Clarity3, + &contract_e3c3, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 3 contract + clarity_db + .initialize_smart_contract( + &contract_id_e3c3, + ClarityVersion::Clarity3, + &ast, + &contract_e3c3, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Call the Clarity 1 and 2 contracts, passing the Clarity 3 contract + sim.execute_next_block_as_conn(|conn| { + let mut tx = conn.start_transaction_processing(); + let res1 = tx + .run_contract_call( + &PrincipalData::parse("STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6").unwrap(), + None, + &contract_id_e2c1, + "get-it", + &[Value::Principal(contract_id_e3c3.clone().into())], + |_, _| false, + ) + .unwrap(); + assert_eq!(Value::okay(Value::UInt(20)).unwrap(), res1.0); + + let res2 = tx + .run_contract_call( + &PrincipalData::parse("STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6").unwrap(), + None, + &contract_id_e2c2, + "get-it", + &[Value::Principal(contract_id_e3c3.clone().into())], + |_, _| false, + ) + .unwrap(); + assert_eq!(Value::okay(Value::UInt(20)).unwrap(), res2.0); + }); +} + +/// Test passing a Clarity 2 contract using `stacks-block-height` and +/// `tenure-height` as a trait into a Clarity 3 contract. +#[test] +fn test_block_heights_across_versions_traits_2_from_3() { + let mut sim = ClarityTestSim::new(); + sim.epoch_bounds = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + let contract_id_e2c1 = QualifiedContractIdentifier::local("epoch-2-clarity-1").unwrap(); + let contract_id_e2c2 = QualifiedContractIdentifier::local("epoch-2-clarity-2").unwrap(); + let contract_id_e3c3 = QualifiedContractIdentifier::local("epoch-3-clarity-3").unwrap(); + + let contract_e2c1_2 = r#" + (define-constant stacks-block-height u555) + (define-data-var tenure-height uint u222) + (define-public (get-int) + (ok (+ stacks-block-height (var-get tenure-height))) + ) + "#; + let contract_e3c3 = format!( + r#" + (define-trait getter ((get-int () (response uint uint)))) + (define-public (get-it (get-trait )) + (contract-call? get-trait get-int) + ) + "# + ); + + sim.execute_next_block(|_env| {}); + + // Deploy the Clarity 1 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 1 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e2c1, + ClarityVersion::Clarity1, + &contract_e2c1_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 1 contract + clarity_db + .initialize_smart_contract( + &contract_id_e2c1, + ClarityVersion::Clarity1, + &ast, + contract_e2c1_2, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Deploy the Clarity 2 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 2 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e2c2, + ClarityVersion::Clarity2, + &contract_e2c1_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 2 contract + clarity_db + .initialize_smart_contract( + &contract_id_e2c2, + ClarityVersion::Clarity2, + &ast, + contract_e2c1_2, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Advance to epoch 3 + while sim.block_height <= 7 { + sim.execute_next_block(|_env| {}); + } + + // Deploy the Clarity 3 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 3 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e3c3, + ClarityVersion::Clarity3, + &contract_e3c3, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 3 contract + clarity_db + .initialize_smart_contract( + &contract_id_e3c3, + ClarityVersion::Clarity3, + &ast, + &contract_e3c3, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Call the Clarity 3 contract, passing the Clarity 1 and 2 contracts + sim.execute_next_block_as_conn(|conn| { + let mut tx = conn.start_transaction_processing(); + let res1 = tx + .run_contract_call( + &PrincipalData::parse("STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6").unwrap(), + None, + &contract_id_e3c3, + "get-it", + &[Value::Principal(contract_id_e2c1.clone().into())], + |_, _| false, + ) + .unwrap(); + assert_eq!(Value::okay(Value::UInt(777)).unwrap(), res1.0); + + let res2 = tx + .run_contract_call( + &PrincipalData::parse("STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6").unwrap(), + None, + &contract_id_e3c3, + "get-it", + &[Value::Principal(contract_id_e2c2.clone().into())], + |_, _| false, + ) + .unwrap(); + assert_eq!(Value::okay(Value::UInt(777)).unwrap(), res2.0); + }); +} + +#[test] +fn test_block_heights_at_block() { + let mut sim = ClarityTestSim::new(); + sim.epoch_bounds = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); + + // Advance to epoch 3.0 + while sim.block_height <= 7 { + sim.execute_next_block(|_env| {}); + } + + let block_height = sim.block_height as u128; + sim.execute_next_block_as_conn(|conn| { + let epoch = conn.get_epoch(); + assert_eq!(epoch, StacksEpochId::Epoch30); + + let contract =r#" + (define-private (test-tenure) (at-block (unwrap-panic (get-block-info? id-header-hash u0)) tenure-height)) + (define-private (test-stacks) (at-block (unwrap-panic (get-block-info? id-header-hash u1)) stacks-block-height)) + "#; + + conn.as_transaction(|clarity_db| { + // Analyze the contract + let (ast, analysis) = clarity_db.analyze_smart_contract( + &contract_identifier, + ClarityVersion::Clarity3, + &contract, + ASTRules::PrecheckSize, + ).unwrap(); + + // Publish the contract + clarity_db + .initialize_smart_contract( + &contract_identifier, + ClarityVersion::Clarity3, + &ast, + contract, + None, + |_, _| false, + ).unwrap(); + }); + + // Call the contracts and validate the results + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::UInt(0), + tx.eval_read_only(&contract_identifier, "(test-tenure)") + .unwrap() + ); + assert_eq!( + Value::UInt(1), + tx.eval_read_only(&contract_identifier, "(test-stacks)") + .unwrap() + ); + }); +} diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index a4d9f9294a..0dfaa630e2 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -951,7 +951,7 @@ fn epoch_20_205_test_all(use_mainnet: bool, epoch: StacksEpochId) { for (ix, f) in NativeFunctions::ALL.iter().enumerate() { // Note: The 2.0 and 2.05 test assumes Clarity1. - if f.get_version() == ClarityVersion::Clarity1 { + if f.get_min_version() == ClarityVersion::Clarity1 { let test = get_simple_test(f); let cost = test_program_cost(test, ClarityVersion::Clarity1, &mut owned_env, ix + 1); diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 147eadc71b..8db6b3043a 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -20,11 +20,11 @@ use clarity::vm::clarity::{ClarityConnection, TransactionConnection}; use clarity::vm::contexts::{Environment, GlobalContext, OwnedEnvironment}; use clarity::vm::contracts::Contract; use clarity::vm::costs::ExecutionCost; -use clarity::vm::database::ClarityDatabase; +use clarity::vm::database::{ClarityDatabase, HeadersDB}; use clarity::vm::errors::{CheckErrors, Error as InterpreterError, Error, RuntimeErrorType}; use clarity::vm::representations::SymbolicExpression; use clarity::vm::test_util::*; -use clarity::vm::tests::test_clarity_versions; +use clarity::vm::tests::{test_clarity_versions, BurnStateDB}; use clarity::vm::types::{ OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, StandardPrincipalData, TypeSignature, Value, @@ -42,7 +42,7 @@ use stacks_common::util::hash::hex_bytes; use crate::chainstate::stacks::boot::{BOOT_CODE_COSTS, BOOT_CODE_COSTS_2, BOOT_CODE_COSTS_3}; use crate::chainstate::stacks::index::ClarityMarfTrieId; -use crate::clarity_vm::clarity::{ClarityInstance, Error as ClarityError}; +use crate::clarity_vm::clarity::{ClarityBlockConnection, ClarityInstance, Error as ClarityError}; use crate::clarity_vm::database::marf::MarfedKV; use crate::clarity_vm::database::MemoryBackingStore; use crate::util_lib::boot::boot_code_id; @@ -88,9 +88,33 @@ const SIMPLE_TOKENS: &str = "(define-map tokens { account: principal } { balance (token-credit! 'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G u200) (token-credit! .tokens u4))"; +/// Since setup_block is not called, we need to manually increment the tenure +/// height each time a new block is made. +fn new_block<'a, 'b>( + clarity: &'a mut ClarityInstance, + current: &StacksBlockId, + next: &StacksBlockId, + header_db: &'b dyn HeadersDB, + burn_state_db: &'b dyn BurnStateDB, +) -> ClarityBlockConnection<'a, 'b> { + let mut block = clarity.begin_block(current, next, header_db, burn_state_db); + block.as_free_transaction(|tx_conn| { + tx_conn + .with_clarity_db(|db| { + if db.get_clarity_epoch_version().unwrap() >= StacksEpochId::Epoch30 { + let tenure_height = db.get_tenure_height().unwrap_or(0); + db.set_tenure_height(tenure_height + 1).unwrap(); + } + Ok(()) + }) + .unwrap(); + }); + block +} + #[apply(test_clarity_versions)] fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { - if epoch < StacksEpochId::Epoch2_05 { + if epoch < StacksEpochId::Epoch2_05 || version > ClarityVersion::Clarity2 { return; } let mut clarity = ClarityInstance::new(false, CHAIN_ID_TESTNET, MarfedKV::temporary()); @@ -170,7 +194,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac gb.commit_block(); { - let mut block = clarity.begin_block( + let mut block = new_block( + &mut clarity, &StacksBlockId([0xfe as u8; 32]), &StacksBlockId([0 as u8; 32]), &TEST_HEADER_DB, @@ -335,7 +360,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac for i in 0..25 { { - let block = clarity.begin_block( + let block = new_block( + &mut clarity, &test_block_headers(i), &test_block_headers(i + 1), &TEST_HEADER_DB, @@ -346,7 +372,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac } { - let mut block = clarity.begin_block( + let mut block = new_block( + &mut clarity, &test_block_headers(25), &test_block_headers(26), &TEST_HEADER_DB, @@ -676,7 +703,8 @@ pub fn rollback_log_memory_test( .commit_block(); { - let mut conn = clarity_instance.begin_block( + let mut conn = new_block( + &mut clarity_instance, &StacksBlockId([0 as u8; 32]), &StacksBlockId([1 as u8; 32]), &TEST_HEADER_DB, @@ -746,7 +774,8 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id .commit_block(); { - let mut conn = clarity_instance.begin_block( + let mut conn = new_block( + &mut clarity_instance, &StacksBlockId([0 as u8; 32]), &StacksBlockId([1 as u8; 32]), &TEST_HEADER_DB, @@ -824,7 +853,8 @@ pub fn argument_memory_test( .commit_block(); { - let mut conn = clarity_instance.begin_block( + let mut conn = new_block( + &mut clarity_instance, &StacksBlockId([0 as u8; 32]), &StacksBlockId([1 as u8; 32]), &TEST_HEADER_DB, @@ -900,7 +930,8 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ .commit_block(); { - let mut conn = clarity_instance.begin_block( + let mut conn = new_block( + &mut clarity_instance, &StacksBlockId([0 as u8; 32]), &StacksBlockId([1 as u8; 32]), &TEST_HEADER_DB, @@ -1018,7 +1049,8 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ .commit_block(); { - let mut conn = clarity_instance.begin_block( + let mut conn = new_block( + &mut clarity_instance, &StacksBlockId([0 as u8; 32]), &StacksBlockId([1 as u8; 32]), &TEST_HEADER_DB, diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index a1135989a4..08fb9eeec9 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -2019,7 +2019,7 @@ impl MemPoolDB { nonce: u64, ) -> Result, db_error> { let sql = format!( - "SELECT + "SELECT txid, origin_address, origin_nonce, diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 5c237b90ca..a7f4616970 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -40,7 +40,9 @@ pub type StacksEpoch = GenericStacksEpoch; pub const SYSTEM_FORK_SET_VERSION: [u8; 4] = [23u8, 0u8, 0u8, 0u8]; // chain id -pub use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, STACKS_EPOCH_MAX}; +pub use stacks_common::consts::{ + CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, MINING_COMMITMENT_WINDOW, STACKS_EPOCH_MAX, +}; // peer version (big-endian) // first byte == major network protocol version (currently 0x18) @@ -74,10 +76,6 @@ pub const NETWORK_ID_TESTNET: u32 = 0xff000000; // default port pub const NETWORK_P2P_PORT: u16 = 6265; -// sliding burnchain window over which a miner's past block-commit payouts will be used to weight -// its current block-commit in a sortition -pub const MINING_COMMITMENT_WINDOW: u8 = 6; - // Number of previous burnchain blocks to search to find burnchain-hosted Stacks operations pub const BURNCHAIN_TX_SEARCH_WINDOW: u8 = 6; @@ -1097,7 +1095,7 @@ impl StacksEpochExtension for StacksEpoch { StacksEpoch { epoch_id: StacksEpochId::Epoch24, start_height: first_burnchain_height + 20, - end_height: STACKS_EPOCH_MAX, + end_height: first_burnchain_height + 24, block_limit: ExecutionCost { write_length: 210210, write_count: 210210, @@ -1200,7 +1198,7 @@ impl StacksEpochExtension for StacksEpoch { StacksEpoch { epoch_id: StacksEpochId::Epoch24, start_height: first_burnchain_height + 20, - end_height: STACKS_EPOCH_MAX, + end_height: first_burnchain_height + 24, block_limit: ExecutionCost { write_length: 210210, write_count: 210210, diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 02c68a99ff..905f788dc2 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -31,6 +31,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, StacksWorkScore, TrieHash, VRFSeed, }; +use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, *}; use stacks_common::util::secp256k1::{MessageSignature, *}; use stacks_common::util::vrf::VRFProof; @@ -192,6 +193,7 @@ fn mempool_walk_over_fork() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); let blocks_to_broadcast_in = [&b_1, &b_2, &b_4]; @@ -601,6 +603,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); // Load 24 transactions into the mempool, alternating whether or not they have a fee-rate. @@ -796,6 +799,7 @@ fn test_iterate_candidates_skipped_transaction() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); // Load 3 transactions into the mempool @@ -908,6 +912,7 @@ fn test_iterate_candidates_processing_error_transaction() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); // Load 3 transactions into the mempool @@ -1022,6 +1027,7 @@ fn test_iterate_candidates_problematic_transaction() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); // Load 3 transactions into the mempool @@ -1136,6 +1142,7 @@ fn test_iterate_candidates_concurrent_write_lock() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); let mut expected_addr_nonces = HashMap::new(); @@ -1294,6 +1301,7 @@ fn mempool_do_not_replace_tx() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); let mut tx = txs.pop().unwrap(); @@ -1390,6 +1398,7 @@ fn mempool_db_load_store_replace_tx() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); let num_txs = txs.len() as u64; diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 39c50618d5..1f10492b93 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -33,7 +33,7 @@ use tikv_jemallocator::Jemalloc; #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::{File, OpenOptions}; use std::io::prelude::*; use std::io::BufReader; @@ -47,8 +47,12 @@ use blockstack_lib::burnchains::db::{BurnchainBlockData, BurnchainDB}; use blockstack_lib::burnchains::{ Address, Burnchain, PoxConstants, Txid, BLOCKSTACK_MAGIC_MAINNET, }; -use blockstack_lib::chainstate::burn::db::sortdb::SortitionDB; -use blockstack_lib::chainstate::burn::ConsensusHash; +use blockstack_lib::chainstate::burn::db::sortdb::{ + get_block_commit_by_txid, SortitionDB, SortitionHandle, +}; +use blockstack_lib::chainstate::burn::operations::BlockstackOperationType; +use blockstack_lib::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use blockstack_lib::chainstate::coordinator::{get_reward_cycle_info, OnChainRewardSetProvider}; use blockstack_lib::chainstate::nakamoto::NakamotoChainState; use blockstack_lib::chainstate::stacks::db::blocks::{DummyEventDispatcher, StagingBlock}; use blockstack_lib::chainstate::stacks::db::{ @@ -398,7 +402,7 @@ Given a , obtain a 2100 header hash block inventory (with an empty "Usage: {} can-download-microblock Given a , obtain a 2100 header hash inventory (with an empty header cache), and then -check if the associated microblocks can be downloaded +check if the associated microblocks can be downloaded ", argv[0] ); @@ -1017,6 +1021,12 @@ simulating a miner. process::exit(0); } + if argv[1] == "analyze-sortition-mev" { + analyze_sortition_mev(argv); + // should be unreachable + process::exit(1); + } + if argv[1] == "replay-chainstate" { if argv.len() < 7 { eprintln!("Usage: {} OLD_CHAINSTATE_PATH OLD_SORTITION_DB_PATH OLD_BURNCHAIN_DB_PATH NEW_CHAINSTATE_PATH NEW_BURNCHAIN_DB_PATH", &argv[0]); @@ -1646,7 +1656,8 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { return; }; - let block = StacksChainState::extract_stacks_block(&next_staging_block).unwrap(); + let block = + StacksChainState::extract_stacks_block(&next_staging_block).expect("Failed to get block"); let block_size = next_staging_block.block_data.len() as u64; let parent_block_header = match &parent_header_info.anchored_header { @@ -1731,3 +1742,224 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { } }; } + +/// Perform an analysis of the anti-MEV algorithm in epoch 3.0, vis-a-vis the status quo. +/// Results are printed to stdout. +/// Exits with 0 on success, and 1 on failure. +fn analyze_sortition_mev(argv: Vec) { + if argv.len() < 7 || (argv.len() >= 7 && argv.len() % 2 != 1) { + eprintln!( + "Usage: {} /path/to/burnchain/db /path/to/sortition/db /path/to/chainstate/db start_height end_height [advantage_miner advantage_burn ..]", + &argv[0] + ); + process::exit(1); + } + + let burnchaindb_path = argv[2].clone(); + let sortdb_path = argv[3].clone(); + let chainstate_path = argv[4].clone(); + let start_height: u64 = argv[5].parse().unwrap(); + let end_height: u64 = argv[6].parse().unwrap(); + + let mut advantages = HashMap::new(); + if argv.len() >= 7 { + let mut i = 7; + while i + 2 < argv.len() { + let advantaged_miner = argv[i].clone(); + let advantage: u64 = argv[i + 1].parse().unwrap(); + advantages.insert(advantaged_miner, advantage); + i += 2; + } + } + + let mut sortdb = + SortitionDB::open(&sortdb_path, true, PoxConstants::mainnet_default()).unwrap(); + sortdb.dryrun = true; + let burnchain = Burnchain::new(&burnchaindb_path, "bitcoin", "mainnet").unwrap(); + let burnchaindb = BurnchainDB::connect(&burnchaindb_path, &burnchain, true).unwrap(); + let (mut chainstate, _) = + StacksChainState::open(true, 0x00000001, &chainstate_path, None).unwrap(); + + let mut wins_epoch2 = BTreeMap::new(); + let mut wins_epoch3 = BTreeMap::new(); + + for height in start_height..end_height { + debug!("Get ancestor snapshots for {}", height); + let (tip_sort_id, parent_ancestor_sn, ancestor_sn) = { + let mut sort_tx = sortdb.tx_begin_at_tip(); + let tip_sort_id = sort_tx.tip(); + let ancestor_sn = sort_tx + .get_block_snapshot_by_height(height) + .unwrap() + .unwrap(); + let parent_ancestor_sn = sort_tx + .get_block_snapshot_by_height(height - 1) + .unwrap() + .unwrap(); + (tip_sort_id, parent_ancestor_sn, ancestor_sn) + }; + + let mut burn_block = + BurnchainDB::get_burnchain_block(burnchaindb.conn(), &ancestor_sn.burn_header_hash) + .unwrap(); + + debug!( + "Get reward cycle info at {}", + burn_block.header.block_height + ); + let rc_info_opt = get_reward_cycle_info( + burn_block.header.block_height, + &burn_block.header.parent_block_hash, + &tip_sort_id, + &burnchain, + &burnchaindb, + &mut chainstate, + &mut sortdb, + &OnChainRewardSetProvider::new(), + false, + ) + .unwrap(); + + let mut ops = burn_block.ops.clone(); + for op in ops.iter_mut() { + if let BlockstackOperationType::LeaderBlockCommit(op) = op { + if let Some(extra_burn) = advantages.get(&op.apparent_sender.to_string()) { + debug!( + "Miner {} gets {} extra burn fee", + &op.apparent_sender.to_string(), + extra_burn + ); + op.burn_fee += *extra_burn; + } + } + } + burn_block.ops = ops; + + debug!("Re-evaluate sortition at height {}", height); + let (next_sn, state_transition) = sortdb + .evaluate_sortition( + &burn_block.header, + burn_block.ops.clone(), + &burnchain, + &tip_sort_id, + rc_info_opt, + |_| (), + ) + .unwrap(); + + assert_eq!(next_sn.block_height, ancestor_sn.block_height); + assert_eq!(next_sn.burn_header_hash, ancestor_sn.burn_header_hash); + + let mut sort_tx = sortdb.tx_begin_at_tip(); + let tip_pox_id = sort_tx.get_pox_id().unwrap(); + let next_sn_nakamoto = BlockSnapshot::make_snapshot_in_epoch( + &mut sort_tx, + &burnchain, + &ancestor_sn.sortition_id, + &tip_pox_id, + &parent_ancestor_sn, + &burn_block.header, + &state_transition, + 0, + StacksEpochId::Epoch30, + ) + .unwrap(); + + assert_eq!(next_sn.block_height, next_sn_nakamoto.block_height); + assert_eq!(next_sn.burn_header_hash, next_sn_nakamoto.burn_header_hash); + + let winner_epoch2 = get_block_commit_by_txid( + &sort_tx, + &ancestor_sn.sortition_id, + &next_sn.winning_block_txid, + ) + .unwrap() + .map(|cmt| format!("{:?}", &cmt.apparent_sender.to_string())) + .unwrap_or("(null)".to_string()); + + let winner_epoch3 = get_block_commit_by_txid( + &sort_tx, + &ancestor_sn.sortition_id, + &next_sn_nakamoto.winning_block_txid, + ) + .unwrap() + .map(|cmt| format!("{:?}", &cmt.apparent_sender.to_string())) + .unwrap_or("(null)".to_string()); + + wins_epoch2.insert( + (next_sn.block_height, next_sn.burn_header_hash), + winner_epoch2, + ); + wins_epoch3.insert( + ( + next_sn_nakamoto.block_height, + next_sn_nakamoto.burn_header_hash, + ), + winner_epoch3, + ); + } + + let mut all_wins_epoch2 = BTreeMap::new(); + let mut all_wins_epoch3 = BTreeMap::new(); + + println!("Wins epoch 2"); + println!("------------"); + println!("height,burn_header_hash,winner"); + for ((height, bhh), winner) in wins_epoch2.iter() { + println!("{},{},{}", height, bhh, winner); + if let Some(cnt) = all_wins_epoch2.get_mut(winner) { + *cnt += 1; + } else { + all_wins_epoch2.insert(winner, 1); + } + } + + println!("------------"); + println!("Wins epoch 3"); + println!("------------"); + println!("height,burn_header_hash,winner"); + for ((height, bhh), winner) in wins_epoch3.iter() { + println!("{},{},{}", height, bhh, winner); + if let Some(cnt) = all_wins_epoch3.get_mut(winner) { + *cnt += 1; + } else { + all_wins_epoch3.insert(winner, 1); + } + } + + println!("---------------"); + println!("Differences"); + println!("---------------"); + println!("height,burn_header_hash,winner_epoch2,winner_epoch3"); + for ((height, bhh), winner) in wins_epoch2.iter() { + let Some(epoch3_winner) = wins_epoch3.get(&(*height, *bhh)) else { + continue; + }; + if epoch3_winner != winner { + println!("{},{},{},{}", height, bhh, winner, epoch3_winner); + } + } + + println!("---------------"); + println!("All epoch2 wins"); + println!("---------------"); + println!("miner,count"); + for (winner, count) in all_wins_epoch2.iter() { + println!("{},{}", winner, count); + } + + println!("---------------"); + println!("All epoch3 wins"); + println!("---------------"); + println!("miner,count,degradation"); + for (winner, count) in all_wins_epoch3.into_iter() { + let degradation = (count as f64) + / (all_wins_epoch2 + .get(&winner) + .map(|cnt| *cnt as f64) + .unwrap_or(0.00000000000001f64)); + println!("{},{},{}", &winner, count, degradation); + } + + process::exit(0); +} diff --git a/stackslib/src/monitoring/mod.rs b/stackslib/src/monitoring/mod.rs index fa83fe97ab..00411db70c 100644 --- a/stackslib/src/monitoring/mod.rs +++ b/stackslib/src/monitoring/mod.rs @@ -46,9 +46,10 @@ pub fn increment_rpc_calls_counter() { prometheus::RPC_CALL_COUNTER.inc(); } +#[allow(unused_mut)] pub fn instrument_http_request_handler( conv_http: &mut ConversationHttp, - mut req: StacksHttpRequest, + #[allow(unused_mut)] mut req: StacksHttpRequest, handler: F, ) -> Result where diff --git a/stackslib/src/net/api/getstxtransfercost.rs b/stackslib/src/net/api/getstxtransfercost.rs index 5f732c6500..b8801e7d7c 100644 --- a/stackslib/src/net/api/getstxtransfercost.rs +++ b/stackslib/src/net/api/getstxtransfercost.rs @@ -16,6 +16,7 @@ use std::io::{Read, Write}; +use clarity::vm::costs::ExecutionCost; use regex::{Captures, Regex}; use stacks_common::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksBlockId, StacksPublicKey, @@ -23,6 +24,7 @@ use stacks_common::types::chainstate::{ use stacks_common::types::net::PeerHost; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{Hash160, Sha256Sum}; +use url::form_urlencoded; use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::Txid; @@ -30,19 +32,23 @@ use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; use crate::chainstate::stacks::db::StacksChainState; use crate::core::mempool::MemPoolDB; +use crate::net::api::postfeerate::RPCPostFeeRateRequestHandler; use crate::net::http::{ - parse_json, Error, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, - HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, + parse_json, Error, HttpBadRequest, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, }; use crate::net::httpcore::{ HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, }; use crate::net::p2p::PeerNetwork; -use crate::net::{Error as NetError, StacksNodeState}; +use crate::net::{Error as NetError, HttpServerError, StacksNodeState}; use crate::version_string; +pub(crate) const SINGLESIG_TX_TRANSFER_LEN: u64 = 180; + #[derive(Clone)] pub struct RPCGetStxTransferCostRequestHandler {} + impl RPCGetStxTransferCostRequestHandler { pub fn new() -> Self { Self {} @@ -74,7 +80,7 @@ impl HttpRequest for RPCGetStxTransferCostRequestHandler { ) -> Result { if preamble.get_content_length() != 0 { return Err(Error::DecodeError( - "Invalid Http request: expected 0-length body for GetInfo".to_string(), + "Invalid Http request: expected 0-length body".to_string(), )); } Ok(HttpRequestContents::new().query_string(query)) @@ -92,9 +98,57 @@ impl RPCRequestHandler for RPCGetStxTransferCostRequestHandler { _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - // todo -- need to actually estimate the cost / length for token transfers - // right now, it just uses the minimum. - let fee = MINIMUM_TX_FEE_RATE_PER_BYTE; + // NOTE: The estimated length isn't needed per se because we're returning a fee rate, but + // we do need an absolute length to use the estimator (so supply a common one). + let estimated_len = SINGLESIG_TX_TRANSFER_LEN; + + let fee_resp = node.with_node_state(|_network, sortdb, _chainstate, _mempool, rpc_args| { + let tip = self.get_canonical_burn_chain_tip(&preamble, sortdb)?; + let stacks_epoch = self.get_stacks_epoch(&preamble, sortdb, tip.block_height)?; + + if let Some((_, fee_estimator, metric)) = rpc_args.get_estimators_ref() { + // STX transfer transactions have zero runtime cost + let estimated_cost = ExecutionCost::zero(); + let estimations = + RPCPostFeeRateRequestHandler::estimate_tx_fee_from_cost_and_length( + &preamble, + fee_estimator, + metric, + estimated_cost, + estimated_len, + stacks_epoch, + )? + .estimations; + if estimations.len() != 3 { + // logic bug, but treat as runtime error + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new( + "Logic error in fee estimation: did not get three estimates".into(), + ), + )); + } + + // safety -- checked estimations.len() == 3 above + let median_estimation = &estimations[1]; + + // NOTE: this returns the fee _rate_ + Ok(median_estimation.fee / estimated_len) + } else { + // unlike `POST /v2/fees/transaction`, this method can't fail due to the + // unavailability of cost estimation, so just assume the minimum fee. + debug!("Fee and cost estimation not configured on this stacks node"); + Ok(MINIMUM_TX_FEE_RATE_PER_BYTE) + } + }); + + let fee = match fee_resp { + Ok(fee) => fee, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + let mut preamble = HttpResponsePreamble::ok_json(&preamble); preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); let body = HttpResponseContents::try_from_json(&fee)?; @@ -116,13 +170,9 @@ impl HttpResponse for RPCGetStxTransferCostRequestHandler { impl StacksHttpRequest { pub fn new_get_stx_transfer_cost(host: PeerHost) -> StacksHttpRequest { - StacksHttpRequest::new_for_peer( - host, - "GET".into(), - "/v2/fees/transfer".into(), - HttpRequestContents::new(), - ) - .expect("FATAL: failed to construct request from infallible data") + let contents = HttpRequestContents::new(); + StacksHttpRequest::new_for_peer(host, "GET".into(), "/v2/fees/transfer".into(), contents) + .expect("FATAL: failed to construct request from infallible data") } } diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index b804af0576..2669c64356 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -257,6 +257,7 @@ impl NakamotoBlockProposal { self.block.header.burn_spent, tenure_change, coinbase, + self.block.header.signer_bitvec.len(), )?; let mut miner_tenure_info = diff --git a/stackslib/src/net/api/postfeerate.rs b/stackslib/src/net/api/postfeerate.rs index ab9691fdec..376d8bf3da 100644 --- a/stackslib/src/net/api/postfeerate.rs +++ b/stackslib/src/net/api/postfeerate.rs @@ -34,7 +34,9 @@ use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::TransactionPayload; use crate::core::mempool::MemPoolDB; -use crate::cost_estimates::FeeRateEstimate; +use crate::core::StacksEpoch; +use crate::cost_estimates::metrics::CostMetric; +use crate::cost_estimates::{CostEstimator, FeeEstimator, FeeRateEstimate}; use crate::net::http::{ parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, @@ -92,6 +94,7 @@ pub struct RPCPostFeeRateRequestHandler { pub estimated_len: Option, pub transaction_payload: Option, } + impl RPCPostFeeRateRequestHandler { pub fn new() -> Self { Self { @@ -99,6 +102,48 @@ impl RPCPostFeeRateRequestHandler { transaction_payload: None, } } + + /// Estimate a transaction fee, given its execution cost estimation and length estimation + /// and cost estimators. + /// Returns Ok(fee structure) on success + /// Returns Err(HTTP response) on error + pub fn estimate_tx_fee_from_cost_and_length( + preamble: &HttpRequestPreamble, + fee_estimator: &dyn FeeEstimator, + metric: &dyn CostMetric, + estimated_cost: ExecutionCost, + estimated_len: u64, + stacks_epoch: StacksEpoch, + ) -> Result { + let scalar_cost = + metric.from_cost_and_len(&estimated_cost, &stacks_epoch.block_limit, estimated_len); + let fee_rates = fee_estimator.get_rate_estimates().map_err(|e| { + StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new(format!( + "Estimator RPC endpoint failed to estimate fees for tx: {:?}", + &e + )), + ) + })?; + + let mut estimations = RPCFeeEstimate::estimate_fees(scalar_cost, fee_rates).to_vec(); + + let minimum_fee = estimated_len * MINIMUM_TX_FEE_RATE_PER_BYTE; + + for estimate in estimations.iter_mut() { + if estimate.fee < minimum_fee { + estimate.fee = minimum_fee; + } + } + + Ok(RPCFeeEstimateResponse { + estimated_cost, + estimations, + estimated_cost_scalar: scalar_cost, + cost_scalar_change_by_byte: metric.change_per_byte(), + }) + } } /// Decode the HTTP request @@ -206,39 +251,14 @@ impl RPCRequestHandler for RPCPostFeeRateRequestHandler { ) })?; - let scalar_cost = metric.from_cost_and_len( - &estimated_cost, - &stacks_epoch.block_limit, - estimated_len, - ); - let fee_rates = fee_estimator.get_rate_estimates().map_err(|e| { - StacksHttpResponse::new_error( - &preamble, - &HttpBadRequest::new(format!( - "Estimator RPC endpoint failed to estimate fees for tx {}: {:?}", - &tx.name(), - &e - )), - ) - })?; - - let mut estimations = - RPCFeeEstimate::estimate_fees(scalar_cost, fee_rates).to_vec(); - - let minimum_fee = estimated_len * MINIMUM_TX_FEE_RATE_PER_BYTE; - - for estimate in estimations.iter_mut() { - if estimate.fee < minimum_fee { - estimate.fee = minimum_fee; - } - } - - Ok(RPCFeeEstimateResponse { + Self::estimate_tx_fee_from_cost_and_length( + &preamble, + fee_estimator, + metric, estimated_cost, - estimations, - estimated_cost_scalar: scalar_cost, - cost_scalar_change_by_byte: metric.change_per_byte(), - }) + estimated_len, + stacks_epoch, + ) } else { debug!("Fee and cost estimation not configured on this stacks node"); Err(StacksHttpResponse::new_error( diff --git a/stackslib/src/net/api/tests/getblock.rs b/stackslib/src/net/api/tests/getblock.rs index c873c52620..d670b55edc 100644 --- a/stackslib/src/net/api/tests/getblock.rs +++ b/stackslib/src/net/api/tests/getblock.rs @@ -18,7 +18,6 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; use clarity::vm::{ClarityName, ContractName}; -use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, }; diff --git a/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs b/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs index a4eb372abf..421264fd9a 100644 --- a/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs +++ b/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs @@ -23,7 +23,7 @@ use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, }; use stacks_common::types::net::PeerHost; -use stacks_common::types::Address; +use stacks_common::types::{Address, StacksEpochId}; use super::TestRPC; use crate::chainstate::stacks::db::blocks::test::*; @@ -91,13 +91,13 @@ fn test_try_make_response() { ) .unwrap(); - let parent_block = make_codec_test_block(25); + let parent_block = make_codec_test_block(25, StacksEpochId::latest()); let parent_consensus_hash = ConsensusHash([0x02; 20]); let mut mblocks = make_sample_microblock_stream(&privk, &parent_block.block_hash()); mblocks.truncate(15); - let mut child_block = make_codec_test_block(25); + let mut child_block = make_codec_test_block(25, StacksEpochId::latest()); let child_consensus_hash = ConsensusHash([0x03; 20]); child_block.header.parent_block = parent_block.block_hash(); diff --git a/stackslib/src/net/api/tests/getmicroblocks_indexed.rs b/stackslib/src/net/api/tests/getmicroblocks_indexed.rs index 0676ecc497..aba7fd5c23 100644 --- a/stackslib/src/net/api/tests/getmicroblocks_indexed.rs +++ b/stackslib/src/net/api/tests/getmicroblocks_indexed.rs @@ -23,7 +23,7 @@ use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, }; use stacks_common::types::net::PeerHost; -use stacks_common::types::Address; +use stacks_common::types::{Address, StacksEpochId}; use super::TestRPC; use crate::chainstate::stacks::db::blocks::test::*; @@ -89,7 +89,7 @@ fn test_try_make_response() { "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", ) .unwrap(); - let parent_block = make_codec_test_block(25); + let parent_block = make_codec_test_block(25, StacksEpochId::latest()); let parent_consensus_hash = ConsensusHash([0x02; 20]); let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( &parent_consensus_hash, @@ -99,7 +99,7 @@ fn test_try_make_response() { let mut mblocks = make_sample_microblock_stream(&privk, &parent_block.block_hash()); mblocks.truncate(15); - let mut child_block = make_codec_test_block(25); + let mut child_block = make_codec_test_block(25, StacksEpochId::latest()); let child_consensus_hash = ConsensusHash([0x03; 20]); child_block.header.parent_block = parent_block.block_hash(); diff --git a/stackslib/src/net/api/tests/getstxtransfercost.rs b/stackslib/src/net/api/tests/getstxtransfercost.rs index 6c4cccc369..66e557f413 100644 --- a/stackslib/src/net/api/tests/getstxtransfercost.rs +++ b/stackslib/src/net/api/tests/getstxtransfercost.rs @@ -25,6 +25,7 @@ use stacks_common::types::Address; use super::test_rpc; use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::getstxtransfercost::SINGLESIG_TX_TRANSFER_LEN; use crate::net::api::*; use crate::net::connection::ConnectionOptions; use crate::net::httpcore::{ @@ -67,6 +68,7 @@ fn test_try_make_response() { let mut responses = test_rpc(function_name!(), vec![request]); assert_eq!(responses.len(), 1); + responses.reverse(); let response = responses.pop().unwrap(); debug!( @@ -80,5 +82,6 @@ fn test_try_make_response() { ); let fee_rate = response.decode_stx_transfer_fee().unwrap(); + debug!("fee_rate = {:?}", &fee_rate); assert_eq!(fee_rate, MINIMUM_TX_FEE_RATE_PER_BYTE); } diff --git a/stackslib/src/net/api/tests/postblock.rs b/stackslib/src/net/api/tests/postblock.rs index 287e97f613..7412df9334 100644 --- a/stackslib/src/net/api/tests/postblock.rs +++ b/stackslib/src/net/api/tests/postblock.rs @@ -20,7 +20,7 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddre use clarity::vm::{ClarityName, ContractName, Value}; use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; use stacks_common::types::net::PeerHost; -use stacks_common::types::Address; +use stacks_common::types::{Address, StacksEpochId}; use super::TestRPC; use crate::chainstate::stacks::test::make_codec_test_block; @@ -38,7 +38,7 @@ fn test_try_parse_request() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); - let block = make_codec_test_block(3); + let block = make_codec_test_block(3, StacksEpochId::Epoch25); let request = StacksHttpRequest::new_post_block(addr.into(), ConsensusHash([0x11; 20]), block.clone()); let bytes = request.try_serialize().unwrap(); diff --git a/stackslib/src/net/api/tests/postmempoolquery.rs b/stackslib/src/net/api/tests/postmempoolquery.rs index 1f528c57c5..b669beb2e4 100644 --- a/stackslib/src/net/api/tests/postmempoolquery.rs +++ b/stackslib/src/net/api/tests/postmempoolquery.rs @@ -20,7 +20,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; use clarity::vm::{ClarityName, ContractName, Value}; -use stacks_common::codec::{read_next, Error as CodecError, StacksMessageCodec}; +use stacks_common::codec::{Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, }; diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 1c116a6174..9ef77e169a 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -1142,7 +1142,7 @@ impl PeerDB { } /// Set a peer as an initial peer - fn set_initial_peer( + pub fn set_initial_peer( tx: &Transaction, network_id: u32, peer_addr: &PeerAddress, diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 8df013a8c0..62a5d02470 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -52,10 +52,7 @@ pub const INV_SYNC_INTERVAL: u64 = 150; #[cfg(test)] pub const INV_SYNC_INTERVAL: u64 = 3; -#[cfg(not(test))] pub const INV_REWARD_CYCLES: u64 = 2; -#[cfg(test)] -pub const INV_REWARD_CYCLES: u64 = 1; #[derive(Debug, PartialEq, Clone)] pub struct PeerBlocksInv { @@ -1083,7 +1080,7 @@ impl InvState { pub fn cull_bad_peers(&mut self) -> HashSet { let mut bad_peers = HashSet::new(); for (nk, stats) in self.block_stats.iter() { - if stats.status == NodeStatus::Broken || stats.status == NodeStatus::Dead { + if stats.status == NodeStatus::Broken { debug!( "Peer {:?} has node status {:?}; culling...", nk, &stats.status @@ -1756,7 +1753,7 @@ impl PeerNetwork { } /// Determine at which reward cycle to begin scanning inventories - fn get_block_scan_start(&self, sortdb: &SortitionDB, highest_remote_reward_cycle: u64) -> u64 { + pub(crate) fn get_block_scan_start(&self, sortdb: &SortitionDB) -> u64 { // see if the stacks tip affirmation map and heaviest affirmation map diverge. If so, then // start scaning at the reward cycle just before that. let am_rescan_rc = self @@ -1783,19 +1780,18 @@ impl PeerNetwork { .block_height_to_reward_cycle(stacks_tip_burn_block_height) .unwrap_or(0); - let start_reward_cycle = cmp::min( - stacks_tip_rc, - highest_remote_reward_cycle.saturating_sub(self.connection_opts.inv_reward_cycles), - ); + let start_reward_cycle = + stacks_tip_rc.saturating_sub(self.connection_opts.inv_reward_cycles); let rescan_rc = cmp::min(am_rescan_rc, start_reward_cycle); test_debug!( - "begin blocks inv scan at {} = min({},{},{})", + "begin blocks inv scan at {} = min({},{}) stacks_tip_am={} heaviest_am={}", rescan_rc, - stacks_tip_rc, - highest_remote_reward_cycle.saturating_sub(self.connection_opts.inv_reward_cycles), - am_rescan_rc + am_rescan_rc, + start_reward_cycle, + &self.stacks_tip_affirmation_map, + &self.heaviest_affirmation_map ); rescan_rc } @@ -1814,12 +1810,7 @@ impl PeerNetwork { Some(x) => x, None => { // proceed to block scan - let scan_start_rc = self.get_block_scan_start( - sortdb, - self.burnchain - .block_height_to_reward_cycle(stats.inv.get_block_height()) - .unwrap_or(0), - ); + let scan_start_rc = self.get_block_scan_start(sortdb); debug!("{:?}: cannot make any more GetPoxInv requests for {:?}; proceeding to block inventory scan at reward cycle {}", &self.local_peer, nk, scan_start_rc); stats.reset_block_scan(scan_start_rc); @@ -1876,12 +1867,7 @@ impl PeerNetwork { // proceed with block scan. // If we're in IBD, then this is an always-allowed peer and we should // react to divergences by deepening our rescan. - let scan_start_rc = self.get_block_scan_start( - sortdb, - self.burnchain - .block_height_to_reward_cycle(stats.inv.get_block_height()) - .unwrap_or(0), - ); + let scan_start_rc = self.get_block_scan_start(sortdb); debug!( "{:?}: proceeding to block inventory scan for {:?} (diverged) at reward cycle {} (ibd={})", &self.local_peer, nk, scan_start_rc, ibd @@ -1982,12 +1968,7 @@ impl PeerNetwork { } // proceed to block scan. - let scan_start = self.get_block_scan_start( - sortdb, - self.burnchain - .block_height_to_reward_cycle(stats.inv.get_block_height()) - .unwrap_or(0), - ); + let scan_start = self.get_block_scan_start(sortdb); debug!( "{:?}: proceeding to block inventory scan for {:?} at reward cycle {}", &self.local_peer, nk, scan_start @@ -2368,7 +2349,6 @@ impl PeerNetwork { .unwrap_or(network.burnchain.reward_cycle_to_block_height( network.get_block_scan_start( sortdb, - network.pox_id.num_inventory_reward_cycles() as u64, ), )) .saturating_sub(sortdb.first_block_height); @@ -2455,6 +2435,10 @@ impl PeerNetwork { good_sync_peers_set.insert(random_sync_peers_list[i].clone()); } } else { + // make *sure* this list isn't empty + for bootstrap_peer in bootstrap_peers.iter() { + good_sync_peers_set.insert(bootstrap_peer.clone()); + } debug!( "{:?}: in initial block download; only inv-sync with {} always-allowed peers", &network.local_peer, @@ -2661,8 +2645,32 @@ impl PeerNetwork { (done, throttled) } + /// Check to see if an epcoh2x peer has fully sync'ed. + /// (has crate visibility for testing) + pub(crate) fn check_peer_epoch2x_synced( + &self, + ibd: bool, + num_reward_cycles_synced: u64, + ) -> bool { + // either not in IBD, and we've sync'ed the highest reward cycle in the PoX vector, + // OR, + // in IBD, and we've sync'ed up to the highest sortition's reward cycle. + // + // The difference is that in the former case, the PoX inventory vector will be as long as + // the sortition history, but the number of reward cycles tracked by the inv state machine + // may be less when the node is booting up. So, we preface that check by also checking + // that we're in steady-state mode (i.e. not IBD). + (!ibd && num_reward_cycles_synced >= self.pox_id.num_inventory_reward_cycles() as u64) + || (ibd + && num_reward_cycles_synced + >= self + .burnchain + .block_height_to_reward_cycle(self.burnchain_tip.block_height) + .expect("FATAL: sortition has no reward cycle")) + } + /// Check to see if an always-allowed peer has performed an epoch 2.x inventory sync - fn check_always_allowed_peer_inv_sync_epoch2x(&self) -> bool { + fn check_always_allowed_peer_inv_sync_epoch2x(&self, ibd: bool) -> bool { // only count an inv_sync as passing if there's an always-allowed node // in our inv state let always_allowed: HashSet<_> = @@ -2702,7 +2710,7 @@ impl PeerNetwork { continue; } - if stats.inv.num_reward_cycles >= self.pox_id.num_inventory_reward_cycles() as u64 { + if self.check_peer_epoch2x_synced(ibd, stats.inv.num_reward_cycles) { // we have fully sync'ed with an always-allowed peer debug!( "{:?}: Fully-sync'ed PoX inventory from {}", @@ -2763,7 +2771,7 @@ impl PeerNetwork { return work_state; } - let finished_always_allowed_inv_sync = self.check_always_allowed_peer_inv_sync_epoch2x(); + let finished_always_allowed_inv_sync = self.check_always_allowed_peer_inv_sync_epoch2x(ibd); if finished_always_allowed_inv_sync { debug!( "{:?}: synchronized inventories with at least one always-allowed peer", diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index ddf0e6a713..bd064774c5 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2641,6 +2641,26 @@ pub mod test { &self.network.local_peer } + pub fn add_neighbor( + &mut self, + n: &mut Neighbor, + stacker_dbs: Option<&[QualifiedContractIdentifier]>, + bootstrap: bool, + ) { + let mut tx = self.network.peerdb.tx_begin().unwrap(); + n.save(&mut tx, stacker_dbs).unwrap(); + if bootstrap { + PeerDB::set_initial_peer( + &tx, + self.config.network_id, + &n.addr.addrbytes, + n.addr.port, + ) + .unwrap(); + } + tx.commit().unwrap(); + } + // TODO: DRY up from PoxSyncWatchdog pub fn infer_initial_burnchain_block_download( burnchain: &Burnchain, @@ -2849,7 +2869,15 @@ pub mod test { &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true); + let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true, false); + (x.0, x.1, x.2) + } + + pub fn next_burnchain_block_diverge( + &mut self, + blockstack_ops: Vec, + ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true, true); (x.0, x.1, x.2) } @@ -2862,14 +2890,14 @@ pub mod test { ConsensusHash, Option, ) { - self.inner_next_burnchain_block(blockstack_ops, true, true, true) + self.inner_next_burnchain_block(blockstack_ops, true, true, true, false) } pub fn next_burnchain_block_raw( &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, false, false, true); + let x = self.inner_next_burnchain_block(blockstack_ops, false, false, true, false); (x.0, x.1, x.2) } @@ -2877,7 +2905,7 @@ pub mod test { &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, false, false, false); + let x = self.inner_next_burnchain_block(blockstack_ops, false, false, false, false); (x.0, x.1, x.2) } @@ -2890,7 +2918,7 @@ pub mod test { ConsensusHash, Option, ) { - self.inner_next_burnchain_block(blockstack_ops, false, false, true) + self.inner_next_burnchain_block(blockstack_ops, false, false, true, false) } pub fn set_ops_consensus_hash( @@ -2921,6 +2949,7 @@ pub mod test { tip_block_height: u64, tip_block_hash: &BurnchainHeaderHash, num_ops: u64, + ops_determine_block_header: bool, ) -> BurnchainBlockHeader { test_debug!( "make_next_burnchain_block: tip_block_height={} tip_block_hash={} num_ops={}", @@ -2939,8 +2968,16 @@ pub mod test { let now = BURNCHAIN_TEST_BLOCK_TIME; let block_header_hash = BurnchainHeaderHash::from_bitcoin_hash( - &BitcoinIndexer::mock_bitcoin_header(&parent_hdr.block_hash, now as u32) - .bitcoin_hash(), + &BitcoinIndexer::mock_bitcoin_header( + &parent_hdr.block_hash, + (now as u32) + + if ops_determine_block_header { + num_ops as u32 + } else { + 0 + }, + ) + .bitcoin_hash(), ); test_debug!( "Block header hash at {} is {}", @@ -3012,6 +3049,7 @@ pub mod test { set_consensus_hash: bool, set_burn_hash: bool, update_burnchain: bool, + ops_determine_block_header: bool, ) -> ( u64, BurnchainHeaderHash, @@ -3035,6 +3073,7 @@ pub mod test { tip.block_height, &tip.burn_header_hash, blockstack_ops.len() as u64, + ops_determine_block_header, ); if set_burn_hash { diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index c819ac049b..31c62a1f8f 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -403,6 +403,17 @@ pub trait NeighborComms { convo.is_authenticated() && convo.peer_version > 0 } + /// Are we in the process of connecting to a neighbor? + fn is_neighbor_connecting(&self, network: &PeerNetwork, nk: &NK) -> bool { + if network.is_connecting_neighbor(&nk.to_neighbor_key(network)) { + return true; + } + let Some(event_id) = self.get_connecting(network, nk) else { + return false; + }; + network.is_connecting(event_id) + } + /// Reset all comms fn reset(&mut self) { let _ = self.take_broken_neighbors(); diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index 276d04124e..7e01a0c448 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -305,16 +305,13 @@ impl PeerNetwork { // time to do a walk yet? if (self.walk_count > self.connection_opts.num_initial_walks || self.walk_retries > self.connection_opts.walk_retry_count) - && self.walk_deadline > get_epoch_time_secs() + && (!ibd && self.walk_deadline > get_epoch_time_secs()) { // we've done enough walks for an initial mixing, or we can't connect to anyone, // so throttle ourselves down until the walk deadline passes. - test_debug!( + debug!( "{:?}: Throttle walk until {} to walk again (walk count: {}, walk retries: {})", - &self.local_peer, - self.walk_deadline, - self.walk_count, - self.walk_retries + &self.local_peer, self.walk_deadline, self.walk_count, self.walk_retries ); return false; } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index b60146dff3..f853bb795a 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -210,6 +210,29 @@ pub enum MempoolSyncState { pub type PeerMap = HashMap; +pub struct ConnectingPeer { + socket: mio_net::TcpStream, + outbound: bool, + timestamp: u64, + nk: NeighborKey, +} + +impl ConnectingPeer { + pub fn new( + socket: mio_net::TcpStream, + outbound: bool, + timestamp: u64, + nk: NeighborKey, + ) -> Self { + Self { + socket, + outbound, + timestamp, + nk, + } + } +} + pub struct PeerNetwork { // constants pub peer_version: u32, @@ -260,7 +283,7 @@ pub struct PeerNetwork { pub peers: PeerMap, pub sockets: HashMap, pub events: HashMap, - pub connecting: HashMap, // (socket, outbound?, connection sent timestamp) + pub connecting: HashMap, pub bans: HashSet, // ongoing messages the network is sending via the p2p interface @@ -646,6 +669,24 @@ impl PeerNetwork { Ok(()) } + /// Call `bind()` only if not already bound + /// Returns: + /// - `Ok(true)` if `bind()` call was successful + /// - `Ok(false)` if `bind()` call was skipped + /// - `Err()` if `bind()`` failed + #[cfg_attr(test, mutants::skip)] + pub fn try_bind( + &mut self, + my_addr: &SocketAddr, + http_addr: &SocketAddr, + ) -> Result { + if self.network.is_some() { + // Already bound + return Ok(false); + } + self.bind(my_addr, http_addr).map(|()| true) + } + /// Get bound neighbor key. This is how this PeerNetwork appears to other nodes. pub fn bound_neighbor_key(&self) -> &NeighborKey { &self.bind_nk @@ -1157,8 +1198,10 @@ impl PeerNetwork { let registered_event_id = network.register(self.p2p_network_handle, hint_event_id, &sock)?; - self.connecting - .insert(registered_event_id, (sock, true, get_epoch_time_secs())); + self.connecting.insert( + registered_event_id, + ConnectingPeer::new(sock, true, get_epoch_time_secs(), neighbor.clone()), + ); registered_event_id } }; @@ -1554,6 +1597,14 @@ impl PeerNetwork { self.connecting.contains_key(&event_id) } + /// Is a neighbor connecting on any event? + pub fn is_connecting_neighbor(&self, nk: &NeighborKey) -> bool { + self.connecting + .iter() + .find(|(_, peer)| peer.nk == *nk) + .is_some() + } + /// Is this neighbor key the same as the one that represents our p2p bind address? pub fn is_bound(&self, neighbor_key: &NeighborKey) -> bool { self.bind_nk.network_id == neighbor_key.network_id @@ -1829,7 +1880,7 @@ impl PeerNetwork { let _ = network.deregister(event_id, &socket); } // deregister socket if still connecting - if let Some((socket, ..)) = self.connecting.remove(&event_id) { + if let Some(ConnectingPeer { socket, .. }) = self.connecting.remove(&event_id) { let _ = network.deregister(event_id, &socket); } } @@ -2089,7 +2140,9 @@ impl PeerNetwork { fn process_connecting_sockets(&mut self, poll_state: &mut NetworkPollState) { for event_id in poll_state.ready.iter() { if self.connecting.contains_key(event_id) { - let (socket, outbound, _) = self.connecting.remove(event_id).unwrap(); + let ConnectingPeer { + socket, outbound, .. + } = self.connecting.remove(event_id).unwrap(); let sock_str = format!("{:?}", &socket); if let Err(_e) = self.register_peer(*event_id, socket, outbound) { debug!( @@ -2241,9 +2294,18 @@ impl PeerNetwork { fn disconnect_unresponsive(&mut self) -> usize { let now = get_epoch_time_secs(); let mut to_remove = vec![]; - for (event_id, (socket, _, ts)) in self.connecting.iter() { - if ts + self.connection_opts.connect_timeout < now { - debug!("{:?}: Disconnect unresponsive connecting peer {:?} (event {}): timed out after {} ({} < {})s", &self.local_peer, socket, event_id, self.connection_opts.timeout, ts + self.connection_opts.timeout, now); + for (event_id, peer) in self.connecting.iter() { + if peer.timestamp + self.connection_opts.connect_timeout < now { + debug!( + "{:?}: Disconnect unresponsive connecting peer {:?} (event {} neighbor {}): timed out after {} ({} < {})s", + &self.local_peer, + &peer.socket, + event_id, + &peer.nk, + self.connection_opts.timeout, + peer.timestamp + self.connection_opts.timeout, + now + ); to_remove.push(*event_id); } } @@ -5523,21 +5585,24 @@ impl PeerNetwork { } if burnchain_tip_changed { - // wake up the inv-sync and downloader -- we have potentially more sortitions - self.hint_sync_invs(self.chain_view.burn_stable_block_height); + if !ibd { + // wake up the inv-sync and downloader -- we have potentially more sortitions + self.hint_sync_invs(self.chain_view.burn_stable_block_height); + + // set up the antientropy protocol to try pushing the latest block + // (helps if you're a miner who gets temporarily disconnected) + self.antientropy_last_push_ts = get_epoch_time_secs(); + self.antientropy_start_reward_cycle = + self.pox_id.num_inventory_reward_cycles().saturating_sub(1) as u64; + } + self.hint_download_rescan( self.chain_view .burn_stable_block_height .saturating_sub(self.burnchain.first_block_height), - false, + ibd, ); - // set up the antientropy protocol to try pushing the latest block - // (helps if you're a miner who gets temporarily disconnected) - self.antientropy_last_push_ts = get_epoch_time_secs(); - self.antientropy_start_reward_cycle = - self.pox_id.num_inventory_reward_cycles().saturating_sub(1) as u64; - // update tx validation information self.ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), canonical_sn.block_height)?; @@ -5850,7 +5915,7 @@ impl PeerNetwork { &stacks_epoch.block_limit, &stacks_epoch.epoch_id, ) { - warn!("Transaction rejected from mempool, {}", &e.into_json(&txid)); + info!("Transaction rejected from mempool, {}", &e.into_json(&txid)); return false; } @@ -7301,4 +7366,31 @@ mod test { assert_eq!(peer_2_mempool_txs.len(), 128); } + + #[test] + fn test_is_connecting() { + let peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_1 = TestPeer::new(peer_1_config); + let nk = peer_1.to_neighbor().addr; + + assert!(!peer_1.network.is_connecting(1)); + assert!(!peer_1.network.is_connecting_neighbor(&nk)); + + let comms = PeerNetworkComms::new(); + assert!(!comms.is_neighbor_connecting(&peer_1.network, &nk)); + + let sock = mio::net::TcpStream::connect(&SocketAddr::from(( + [127, 0, 0, 1], + peer_1.config.server_port, + ))) + .unwrap(); + peer_1.network.connecting.insert( + 1, + ConnectingPeer::new(sock, true, get_epoch_time_secs(), nk.clone()), + ); + + assert!(peer_1.network.is_connecting(1)); + assert!(peer_1.network.is_connecting_neighbor(&nk)); + assert!(comms.is_neighbor_connecting(&peer_1.network, &nk)); + } } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 28ff92ae58..5d7ef50099 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -25,6 +25,7 @@ use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; use clarity::vm::ClarityVersion; use rand::prelude::*; use rand::{thread_rng, Rng}; +use stacks_common::address::public_keys_to_address_hash; use stacks_common::codec::MAX_PAYLOAD_LEN; use stacks_common::types::chainstate::{BurnchainHeaderHash, PoxId, SortitionId, StacksBlockId}; use stacks_common::types::StacksEpochId; @@ -686,9 +687,9 @@ impl Relayer { if epoch_id < StacksEpochId::Epoch30 { error!("Nakamoto blocks are not supported in this epoch"); - return Err(chainstate_error::InvalidStacksBlock( - "Nakamoto blocks are not supported in this epoch".into(), - )); + return Err(chainstate_error::InvalidStacksBlock(format!( + "Nakamoto blocks are not supported in this epoch: {epoch_id}" + ))); } // don't relay this block if it's using the wrong AST rules (this would render at least one of its @@ -1464,7 +1465,7 @@ impl Relayer { /// Verify that a relayed microblock is not problematic -- i.e. it doesn't contain any /// problematic transactions. This is a static check -- we only look at the microblock /// contents. - /// + /// /// Returns true if the check passed -- i.e. no problems. /// Returns false if not pub fn static_check_problematic_relayed_microblock( @@ -2644,6 +2645,7 @@ pub mod test { use crate::chainstate::stacks::test::codec_all_transactions; use crate::chainstate::stacks::tests::{ make_coinbase, make_coinbase_with_nonce, make_smart_contract_with_version, + make_stacks_transfer_order_independent_p2sh, make_stacks_transfer_order_independent_p2wsh, make_user_stacks_transfer, }; use crate::chainstate::stacks::{Error as ChainstateError, *}; @@ -2671,6 +2673,7 @@ pub mod test { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); assert!(all_transactions.len() > MAX_RECENT_MESSAGES); @@ -2822,6 +2825,7 @@ pub mod test { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); assert!(all_transactions.len() > MAX_RECENT_MESSAGES); @@ -5866,7 +5870,6 @@ pub mod test { peer.sortdb = Some(sortdb); peer.stacks_node = Some(node); } - #[test] fn test_block_versioned_smart_contract_gated_at_v210() { let mut peer_config = TestPeerConfig::new(function_name!(), 4248, 4249); diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index c920a3ceff..a26fa2f7b4 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -20,6 +20,7 @@ use std::sync::mpsc::{sync_channel, Receiver, RecvError, SendError, SyncSender, use mio::net as mio_net; use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_secs; use crate::burnchains::{Burnchain, BurnchainView}; @@ -891,7 +892,7 @@ mod test { 1, 0, |client_id, ref mut chainstate| { - let peer_server_block = make_codec_test_block(25); + let peer_server_block = make_codec_test_block(25, StacksEpochId::Epoch25); let peer_server_consensus_hash = ConsensusHash([(client_id + 1) as u8; 20]); let index_block_hash = StacksBlockHeader::make_index_block_hash( &peer_server_consensus_hash, @@ -924,7 +925,7 @@ mod test { // should be a Block let http_response_bytes = http_response_bytes_res.unwrap(); - let peer_server_block = make_codec_test_block(25); + let peer_server_block = make_codec_test_block(25, StacksEpochId::Epoch25); let peer_server_consensus_hash = ConsensusHash([(client_id + 1) as u8; 20]); let index_block_hash = StacksBlockHeader::make_index_block_hash( &peer_server_consensus_hash, @@ -959,7 +960,7 @@ mod test { 10, 0, |client_id, ref mut chainstate| { - let peer_server_block = make_codec_test_block(25); + let peer_server_block = make_codec_test_block(25, StacksEpochId::latest()); let peer_server_consensus_hash = ConsensusHash([(client_id + 1) as u8; 20]); let index_block_hash = StacksBlockHeader::make_index_block_hash( &peer_server_consensus_hash, @@ -992,7 +993,7 @@ mod test { // should be a Block let http_response_bytes = http_response_bytes_res.unwrap(); - let peer_server_block = make_codec_test_block(25); + let peer_server_block = make_codec_test_block(25, StacksEpochId::latest()); let peer_server_consensus_hash = ConsensusHash([(client_id + 1) as u8; 20]); let index_block_hash = StacksBlockHeader::make_index_block_hash( &peer_server_consensus_hash, @@ -1308,7 +1309,7 @@ mod test { 1, 600, |client_id, ref mut chainstate| { - let peer_server_block = make_codec_test_block(25); + let peer_server_block = make_codec_test_block(25, StacksEpochId::latest()); let peer_server_consensus_hash = ConsensusHash([(client_id + 1) as u8; 20]); let index_block_hash = StacksBlockHeader::make_index_block_hash( &peer_server_consensus_hash, diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 7a1b29b2ee..da3ffa4555 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -151,7 +151,7 @@ pub const STACKERDB_MAX_PAGE_COUNT: u32 = 2; pub const STACKERDB_SLOTS_FUNCTION: &str = "stackerdb-get-signer-slots"; pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; -pub const MINER_SLOT_COUNT: u32 = 2; +pub const MINER_SLOT_COUNT: u32 = 1; /// Final result of synchronizing state with a remote set of DB replicas pub struct StackerDBSyncResult { @@ -161,12 +161,12 @@ pub struct StackerDBSyncResult { pub chunk_invs: HashMap, /// list of data to store pub chunks_to_store: Vec, - /// neighbors that died while syncing - dead: HashSet, - /// neighbors that misbehaved while syncing - broken: HashSet, /// neighbors that have stale views, but are otherwise online pub(crate) stale: HashSet, + /// number of connections made + pub num_connections: u64, + /// number of attempted connections + pub num_attempted_connections: u64, } /// Settings for the Stacker DB @@ -390,6 +390,10 @@ pub struct StackerDBSync { need_resync: bool, /// Track stale neighbors pub(crate) stale_neighbors: HashSet, + /// How many attempted connections have been made in the last pass (gets reset) + num_attempted_connections: u64, + /// How many connections have been made in the last pass (gets reset) + num_connections: u64, } impl StackerDBSyncResult { @@ -400,9 +404,9 @@ impl StackerDBSyncResult { contract_id: chunk.contract_id, chunk_invs: HashMap::new(), chunks_to_store: vec![chunk.chunk_data], - dead: HashSet::new(), - broken: HashSet::new(), stale: HashSet::new(), + num_attempted_connections: 0, + num_connections: 0, } } } @@ -433,16 +437,6 @@ impl PeerNetwork { if let Some(config) = stacker_db_configs.get(sc) { match stacker_db_sync.run(self, config) { Ok(Some(result)) => { - // clear broken nodes - for broken in result.broken.iter() { - debug!("StackerDB replica is broken: {:?}", broken); - self.deregister_and_ban_neighbor(broken); - } - // clear dead nodes - for dead in result.dead.iter() { - debug!("StackerDB replica is dead: {:?}", dead); - self.deregister_neighbor(dead); - } results.push(result); } Ok(None) => {} diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 66ad54601a..c3e61acbc4 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -72,6 +72,8 @@ impl StackerDBSync { last_run_ts: 0, need_resync: false, stale_neighbors: HashSet::new(), + num_connections: 0, + num_attempted_connections: 0, }; dbsync.reset(None, config); dbsync @@ -158,7 +160,7 @@ impl StackerDBSync { } /// Reset this state machine, and get the StackerDBSyncResult with newly-obtained chunk data - /// and newly-learned information about broken and dead peers. + /// and newly-learned information about connection statistics pub fn reset( &mut self, network: Option<&PeerNetwork>, @@ -176,9 +178,9 @@ impl StackerDBSync { contract_id: self.smart_contract_id.clone(), chunk_invs, chunks_to_store: chunks, - dead: self.comms.take_dead_neighbors(), - broken: self.comms.take_broken_neighbors(), stale: std::mem::replace(&mut self.stale_neighbors, HashSet::new()), + num_connections: self.num_connections, + num_attempted_connections: self.num_attempted_connections, }; // keep all connected replicas, and replenish from config hints and the DB as needed @@ -211,6 +213,8 @@ impl StackerDBSync { self.last_run_ts = get_epoch_time_secs(); self.state = StackerDBSyncState::ConnectBegin; + self.num_connections = 0; + self.num_attempted_connections = 0; result } @@ -612,7 +616,7 @@ impl StackerDBSync { /// Returns Err(..) on DB query error pub fn connect_begin(&mut self, network: &mut PeerNetwork) -> Result { if self.replicas.len() == 0 { - // find some from the peer Db + // find some from the peer DB let replicas = self.find_qualified_replicas(network)?; self.replicas = replicas; } @@ -628,6 +632,15 @@ impl StackerDBSync { let naddrs = mem::replace(&mut self.replicas, HashSet::new()); for naddr in naddrs.into_iter() { + if self.comms.is_neighbor_connecting(network, &naddr) { + debug!( + "{:?}: connect_begin: already connecting to StackerDB peer {:?}", + network.get_local_peer(), + &naddr + ); + self.replicas.insert(naddr); + continue; + } if self.comms.has_neighbor_session(network, &naddr) { debug!( "{:?}: connect_begin: already connected to StackerDB peer {:?}", @@ -651,10 +664,13 @@ impl StackerDBSync { network.get_local_peer(), &naddr ); + self.num_attempted_connections += 1; + self.num_connections += 1; } Ok(false) => { // need to retry self.replicas.insert(naddr); + self.num_attempted_connections += 1; } Err(_e) => { info!("Failed to begin session with {:?}: {:?}", &naddr, &_e); @@ -718,7 +734,7 @@ impl StackerDBSync { ); // disconnect - self.comms.add_dead(network, &naddr); + self.connected_replicas.remove(&naddr); continue; } diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index eeb2f5aae5..d1ac5e58be 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -32,11 +32,12 @@ use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::net::p2p::PeerNetwork; use crate::net::relay::Relayer; use crate::net::stackerdb::db::SlotValidation; use crate::net::stackerdb::{StackerDBConfig, StackerDBs}; use crate::net::test::{TestPeer, TestPeerConfig}; -use crate::net::{Error as net_error, StackerDBChunkData}; +use crate::net::{Error as net_error, NetworkResult, StackerDBChunkData}; use crate::util_lib::test::with_timeout; const BASE_PORT: u16 = 33000; @@ -179,6 +180,25 @@ fn load_stackerdb(peer: &TestPeer, idx: usize) -> Vec<(SlotMetadata, Vec)> { ret } +fn check_sync_results(network_sync: &NetworkResult) { + for res in network_sync.stacker_db_sync_results.iter() { + assert!(res.num_connections >= res.num_attempted_connections); + } +} + +fn test_reconnect(network: &mut PeerNetwork) { + let mut stacker_db_syncs = network + .stacker_db_syncs + .take() + .expect("FATAL: did not replace stacker dbs"); + + for (_sc, stacker_db_sync) in stacker_db_syncs.iter_mut() { + stacker_db_sync.connect_begin(network).unwrap(); + } + + network.stacker_db_syncs = Some(stacker_db_syncs); +} + #[test] fn test_stackerdb_replica_2_neighbors_1_chunk() { with_timeout(600, || { @@ -234,7 +254,12 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { let res_1 = peer_1.step_with_ibd(false); let res_2 = peer_2.step_with_ibd(false); + // test that re-connects are limited to 1 per host + test_reconnect(&mut peer_1.network); + test_reconnect(&mut peer_2.network); + if let Ok(mut res) = res_1 { + check_sync_results(&res); Relayer::process_stacker_db_chunks( &mut peer_1.network.stackerdbs, &peer_1_db_configs, @@ -252,6 +277,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { } if let Ok(mut res) = res_2 { + check_sync_results(&res); Relayer::process_stacker_db_chunks( &mut peer_2.network.stackerdbs, &peer_2_db_configs, @@ -354,6 +380,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { let res_2 = peer_2.step_with_ibd(false); if let Ok(mut res) = res_1 { + check_sync_results(&res); for sync_res in res.stacker_db_sync_results.iter() { assert_eq!(sync_res.chunks_to_store.len(), 0); if sync_res.stale.len() > 0 { @@ -377,6 +404,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { } if let Ok(mut res) = res_2 { + check_sync_results(&res); for sync_res in res.stacker_db_sync_results.iter() { assert_eq!(sync_res.chunks_to_store.len(), 0); if sync_res.stale.len() > 0 { @@ -428,6 +456,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { let res_2 = peer_2.step_with_ibd(false); if let Ok(mut res) = res_1 { + check_sync_results(&res); Relayer::process_stacker_db_chunks( &mut peer_1.network.stackerdbs, &peer_1_db_configs, @@ -445,6 +474,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { } if let Ok(mut res) = res_2 { + check_sync_results(&res); Relayer::process_stacker_db_chunks( &mut peer_2.network.stackerdbs, &peer_2_db_configs, @@ -550,6 +580,7 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port let res_2 = peer_2.step_with_ibd(false); if let Ok(mut res) = res_1 { + check_sync_results(&res); Relayer::process_stacker_db_chunks( &mut peer_1.network.stackerdbs, &peer_1_db_configs, @@ -567,6 +598,7 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port } if let Ok(mut res) = res_2 { + check_sync_results(&res); Relayer::process_stacker_db_chunks( &mut peer_2.network.stackerdbs, &peer_2_db_configs, @@ -686,7 +718,9 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, for i in 0..num_peers { peers[i].network.stacker_db_configs = peer_db_configs[i].clone(); let res = peers[i].step_with_ibd(false); + if let Ok(mut res) = res { + check_sync_results(&res); let rc_consensus_hash = peers[i].network.get_chain_view().rc_consensus_hash.clone(); Relayer::process_stacker_db_chunks( diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 73472c9c56..31c42c8afb 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -1954,6 +1954,7 @@ fn test_nakamoto_download_run_2_peers() { sn.block_height, &sn.burn_header_hash, ops.len() as u64, + false, ); TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); } @@ -2140,6 +2141,7 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { sn.block_height, &sn.burn_header_hash, ops.len() as u64, + false, ); TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); } diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs index 0a9bc4f7f1..1837d8e1c4 100644 --- a/stackslib/src/net/tests/httpcore.rs +++ b/stackslib/src/net/tests/httpcore.rs @@ -21,6 +21,7 @@ use std::str; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey}; use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; use stacks_common::util::chunked_encoding::{ HttpChunkedTransferWriter, HttpChunkedTransferWriterState, }; @@ -442,7 +443,7 @@ fn test_http_response_type_codec() { "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", ) .unwrap(); - let test_block_info = make_codec_test_block(5); + let test_block_info = make_codec_test_block(5, StacksEpochId::latest()); let test_microblock_info = make_sample_microblock_stream(&privk, &test_block_info.block_hash()); let mut test_block_info_bytes = vec![]; diff --git a/stackslib/src/net/tests/inv/epoch2x.rs b/stackslib/src/net/tests/inv/epoch2x.rs index 3d349e7679..e31b6dc593 100644 --- a/stackslib/src/net/tests/inv/epoch2x.rs +++ b/stackslib/src/net/tests/inv/epoch2x.rs @@ -1241,6 +1241,89 @@ fn test_sync_inv_diagnose_nack() { ); } +#[test] +fn test_inv_sync_start_reward_cycle() { + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + peer_1_config.connection_opts.inv_reward_cycles = 0; + + let mut peer_1 = TestPeer::new(peer_1_config); + + let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + for i in 0..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_1.make_default_tenure(); + peer_1.next_burnchain_block(burn_ops.clone()); + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let _ = peer_1.step(); + + let block_scan_start = peer_1 + .network + .get_block_scan_start(peer_1.sortdb.as_ref().unwrap()); + assert_eq!(block_scan_start, 7); + + peer_1.network.connection_opts.inv_reward_cycles = 1; + + let block_scan_start = peer_1 + .network + .get_block_scan_start(peer_1.sortdb.as_ref().unwrap()); + assert_eq!(block_scan_start, 7); + + peer_1.network.connection_opts.inv_reward_cycles = 2; + + let block_scan_start = peer_1 + .network + .get_block_scan_start(peer_1.sortdb.as_ref().unwrap()); + assert_eq!(block_scan_start, 6); + + peer_1.network.connection_opts.inv_reward_cycles = 3; + + let block_scan_start = peer_1 + .network + .get_block_scan_start(peer_1.sortdb.as_ref().unwrap()); + assert_eq!(block_scan_start, 5); + + peer_1.network.connection_opts.inv_reward_cycles = 300; + + let block_scan_start = peer_1 + .network + .get_block_scan_start(peer_1.sortdb.as_ref().unwrap()); + assert_eq!(block_scan_start, 0); +} + +#[test] +fn test_inv_sync_check_peer_epoch2x_synced() { + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + peer_1_config.connection_opts.inv_reward_cycles = 0; + + let mut peer_1 = TestPeer::new(peer_1_config); + + let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + for i in 0..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_1.make_default_tenure(); + peer_1.next_burnchain_block(burn_ops.clone()); + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let _ = peer_1.step(); + let tip_rc = peer_1 + .network + .burnchain + .block_height_to_reward_cycle(peer_1.network.burnchain_tip.block_height) + .unwrap(); + assert!(tip_rc > 0); + + let pox_rc = peer_1.network.pox_id.num_inventory_reward_cycles() as u64; + + assert!(peer_1.network.check_peer_epoch2x_synced(true, tip_rc)); + assert!(peer_1.network.check_peer_epoch2x_synced(true, tip_rc + 1)); + assert!(!peer_1.network.check_peer_epoch2x_synced(true, tip_rc - 1)); + + assert!(peer_1.network.check_peer_epoch2x_synced(false, pox_rc)); + assert!(peer_1.network.check_peer_epoch2x_synced(false, pox_rc + 1)); + assert!(!peer_1.network.check_peer_epoch2x_synced(false, pox_rc - 1)); +} + #[test] #[ignore] fn test_sync_inv_2_peers_plain() { @@ -1248,12 +1331,15 @@ fn test_sync_inv_2_peers_plain() { let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + peer_1_config.connection_opts.inv_reward_cycles = 10; + peer_2_config.connection_opts.inv_reward_cycles = 10; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; let first_stacks_block_height = { let sn = @@ -1422,12 +1508,15 @@ fn test_sync_inv_2_peers_stale() { let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + peer_1_config.connection_opts.inv_reward_cycles = 10; + peer_2_config.connection_opts.inv_reward_cycles = 10; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; let first_stacks_block_height = { let sn = @@ -1525,14 +1614,17 @@ fn test_sync_inv_2_peers_unstable() { let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); - let stable_confs = peer_1_config.burnchain.stable_confirmations as u64; + peer_1_config.connection_opts.inv_reward_cycles = 10; + peer_2_config.connection_opts.inv_reward_cycles = 10; - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + let stable_confs = peer_1_config.burnchain.stable_confirmations as u64; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; let first_stacks_block_height = { @@ -1559,7 +1651,7 @@ fn test_sync_inv_2_peers_unstable() { } else { // peer 1 diverges test_debug!("Peer 1 diverges at {}", i + first_stacks_block_height); - peer_1.next_burnchain_block(vec![]); + peer_1.next_burnchain_block_diverge(vec![burn_ops[0].clone()]); } } @@ -1734,8 +1826,8 @@ fn test_sync_inv_2_peers_different_pox_vectors() { let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + peer_1_config.connection_opts.inv_reward_cycles = 10; + peer_2_config.connection_opts.inv_reward_cycles = 10; let reward_cycle_length = peer_1_config.burnchain.pox_constants.reward_cycle_length as u64; assert_eq!(reward_cycle_length, 5); @@ -1743,6 +1835,9 @@ fn test_sync_inv_2_peers_different_pox_vectors() { let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let num_blocks = (GETPOXINV_MAX_BITLEN * 3) as u64; let first_stacks_block_height = { diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 72cc8d2491..bceb484cd7 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -62,7 +62,7 @@ name = "stacks-events" path = "src/stacks_events.rs" [features] -monitoring_prom = ["stacks/monitoring_prom"] +monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom"] slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"] prod-genesis-chainstate = [] default = [] diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index f96c0c198b..8cf9ac82be 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -703,8 +703,8 @@ impl BitcoinRegtestController { utxos_to_exclude: Option, block_height: u64, ) -> Option { - // if mock mining, do not even both requesting UTXOs - if self.config.node.mock_mining { + // if mock mining, do not even bother requesting UTXOs + if self.config.get_node_config(false).mock_mining { return None; } diff --git a/testnet/stacks-node/src/chain_data.rs b/testnet/stacks-node/src/chain_data.rs index 4170cf6f6d..0fcc7ca863 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/testnet/stacks-node/src/chain_data.rs @@ -163,6 +163,11 @@ impl MinerStats { // calculate the burn distribution from these operations. // The resulting distribution will contain the user burns that match block commits let burn_dist = BurnSamplePoint::make_min_median_distribution( + if burnchain.is_in_prepare_phase(burn_block_height) { + 1 + } else { + MINING_COMMITMENT_WINDOW + }, windowed_block_commits, windowed_missed_commits, burn_blocks, @@ -647,6 +652,7 @@ pub mod tests { }; let burn_dist = vec![ BurnSamplePoint { + frequency: 10, burns: block_commit_1.burn_fee.into(), median_burn: block_commit_2.burn_fee.into(), range_start: Uint256::zero(), @@ -659,6 +665,7 @@ pub mod tests { candidate: block_commit_1.clone(), }, BurnSamplePoint { + frequency: 10, burns: block_commit_2.burn_fee.into(), median_burn: block_commit_2.burn_fee.into(), range_start: Uint256([ @@ -676,6 +683,7 @@ pub mod tests { candidate: block_commit_2.clone(), }, BurnSamplePoint { + frequency: 10, burns: (block_commit_3.burn_fee).into(), median_burn: block_commit_3.burn_fee.into(), range_start: Uint256([ diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index d738745ca5..ad02341343 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -4,7 +4,7 @@ use std::path::PathBuf; use std::str::FromStr; use std::sync::{Arc, Mutex}; use std::time::Duration; -use std::{fs, thread}; +use std::{cmp, fs, thread}; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{AssetIdentifier, PrincipalData, QualifiedContractIdentifier}; @@ -101,7 +101,8 @@ mod tests { seed = "invalid-hex-value" "#, ) - .unwrap() + .unwrap(), + false ) .unwrap_err() ); @@ -115,7 +116,8 @@ mod tests { local_peer_seed = "invalid-hex-value" "#, ) - .unwrap() + .unwrap(), + false ) .unwrap_err() ); @@ -130,6 +132,7 @@ mod tests { "#, ) .unwrap(), + false, ) .unwrap_err(); assert_eq!( @@ -137,7 +140,7 @@ mod tests { &actual_err_msg[..expected_err_prefix.len()] ); - assert!(Config::from_config_file(ConfigFile::from_str("").unwrap()).is_ok()); + assert!(Config::from_config_file(ConfigFile::from_str("").unwrap(), false).is_ok()); } #[test] @@ -195,6 +198,7 @@ mod tests { "#, ) .unwrap(), + false, ) .expect("Expected to be able to parse block proposal token from file"); @@ -218,6 +222,7 @@ mod tests { "# )) .expect("Expected to be able to parse config file from string"), + false, ) .expect("Expected to be able to parse affirmation map from file"); @@ -241,7 +246,7 @@ mod tests { )) .expect("Expected to be able to parse config file from string"); - assert!(Config::from_config_file(file).is_err()); + assert!(Config::from_config_file(file, false).is_err()); } #[test] @@ -249,6 +254,7 @@ mod tests { let config = Config::from_config_file( ConfigFile::from_str(r#""#) .expect("Expected to be able to parse config file from string"), + false, ) .expect("Expected to be able to parse affirmation map from file"); @@ -266,6 +272,7 @@ mod tests { "#, ) .expect("Expected to be able to parse config file from string"), + false, ) .expect("Expected to be able to parse affirmation map from file"); // Should default add xenon affirmation overrides @@ -291,6 +298,7 @@ mod tests { "#, )) .expect("Expected to be able to parse config file from string"), + false, ) .expect("Expected to be able to parse affirmation map from file"); // Should default add xenon affirmation overrides, but overwrite with the configured one above @@ -537,7 +545,7 @@ impl Config { let Ok(config_file) = ConfigFile::from_path(path.as_str()) else { return self.burnchain.clone(); }; - let Ok(config) = Config::from_config_file(config_file) else { + let Ok(config) = Config::from_config_file(config_file, false) else { return self.burnchain.clone(); }; config.burnchain @@ -552,12 +560,25 @@ impl Config { let Ok(config_file) = ConfigFile::from_path(path.as_str()) else { return self.miner.clone(); }; - let Ok(config) = Config::from_config_file(config_file) else { + let Ok(config) = Config::from_config_file(config_file, false) else { return self.miner.clone(); }; return config.miner; } + pub fn get_node_config(&self, resolve_bootstrap_nodes: bool) -> NodeConfig { + let Some(path) = &self.config_path else { + return self.node.clone(); + }; + let Ok(config_file) = ConfigFile::from_path(path.as_str()) else { + return self.node.clone(); + }; + let Ok(config) = Config::from_config_file(config_file, resolve_bootstrap_nodes) else { + return self.node.clone(); + }; + return config.node; + } + /// Apply any test settings to this burnchain config struct #[cfg_attr(test, mutants::skip)] fn apply_test_settings(&self, burnchain: &mut Burnchain) { @@ -928,11 +949,18 @@ impl Config { Ok(out_epochs) } - pub fn from_config_file(config_file: ConfigFile) -> Result { - Self::from_config_default(config_file, Config::default()) + pub fn from_config_file( + config_file: ConfigFile, + resolve_bootstrap_nodes: bool, + ) -> Result { + Self::from_config_default(config_file, Config::default(), resolve_bootstrap_nodes) } - fn from_config_default(config_file: ConfigFile, default: Config) -> Result { + fn from_config_default( + config_file: ConfigFile, + default: Config, + resolve_bootstrap_nodes: bool, + ) -> Result { let Config { node: default_node_config, burnchain: default_burnchain_config, @@ -983,9 +1011,15 @@ impl Config { }; if let Some(bootstrap_node) = bootstrap_node { - node.set_bootstrap_nodes(bootstrap_node, burnchain.chain_id, burnchain.peer_version); + if resolve_bootstrap_nodes { + node.set_bootstrap_nodes( + bootstrap_node, + burnchain.chain_id, + burnchain.peer_version, + ); + } } else { - if is_mainnet { + if is_mainnet && resolve_bootstrap_nodes { let bootstrap_node = ConfigFile::mainnet().node.unwrap().bootstrap_node.unwrap(); node.set_bootstrap_nodes( bootstrap_node, @@ -1220,6 +1254,26 @@ impl Config { self.events_observers.len() > 0 } + pub fn make_nakamoto_block_builder_settings( + &self, + miner_status: Arc>, + ) -> BlockBuilderSettings { + let miner_config = self.get_miner_config(); + BlockBuilderSettings { + max_miner_time_ms: miner_config.nakamoto_attempt_time_ms, + mempool_settings: MemPoolWalkSettings { + max_walk_time_ms: miner_config.nakamoto_attempt_time_ms, + consider_no_estimate_tx_prob: miner_config.probability_pick_no_estimate_tx, + nonce_cache_size: miner_config.nonce_cache_size, + candidate_retry_cache_size: miner_config.candidate_retry_cache_size, + txs_to_consider: miner_config.txs_to_consider, + filter_origins: miner_config.filter_origins, + }, + miner_status, + confirm_microblocks: false, + } + } + pub fn make_block_builder_settings( &self, attempt: u64, @@ -1268,6 +1322,20 @@ impl Config { } None } + + /// Determine how long the p2p state machine should poll for. + /// If the node is not mining, then use a default value. + /// If the node is mining, however, then at the time of this writing, the miner's latency is in + /// part dependent on the state machine getting block data back to the miner quickly, and thus + /// the poll time is dependent on the first attempt time. + pub fn get_poll_time(&self) -> u64 { + let poll_timeout = if self.node.miner { + cmp::min(5000, self.miner.first_attempt_time_ms / 2) + } else { + 5000 + }; + poll_timeout + } } impl std::default::Default for Config { @@ -1727,6 +1795,13 @@ pub struct NodeConfig { pub max_microblocks: u64, pub wait_time_for_microblocks: u64, pub wait_time_for_blocks: u64, + /// Controls how frequently, in milliseconds, the nakamoto miner's relay thread acts on its own initiative + /// (as opposed to responding to an event from the networking thread, etc.). This is roughly + /// how frequently the miner checks if a new burnchain block has been processed. + /// + /// Default value of 10 seconds is reasonable in mainnet (where bitcoin blocks are ~10 minutes), + /// but environments where burn blocks are more frequent may want to decrease this value. + pub next_initiative_delay: u64, pub prometheus_bind: Option, pub marf_cache_strategy: Option, pub marf_defer_hashing: bool, @@ -2012,6 +2087,7 @@ impl Default for NodeConfig { max_microblocks: u16::MAX as u64, wait_time_for_microblocks: 30_000, wait_time_for_blocks: 30_000, + next_initiative_delay: 10_000, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: true, @@ -2192,6 +2268,8 @@ pub struct MinerConfig { pub first_attempt_time_ms: u64, pub subsequent_attempt_time_ms: u64, pub microblock_attempt_time_ms: u64, + /// Max time to assemble Nakamoto block + pub nakamoto_attempt_time_ms: u64, pub probability_pick_no_estimate_tx: u8, pub block_reward_recipient: Option, /// If possible, mine with a p2wpkh address @@ -2242,6 +2320,7 @@ impl Default for MinerConfig { first_attempt_time_ms: 10, subsequent_attempt_time_ms: 120_000, microblock_attempt_time_ms: 30_000, + nakamoto_attempt_time_ms: 10_000, probability_pick_no_estimate_tx: 25, block_reward_recipient: None, segwit: false, @@ -2459,6 +2538,7 @@ pub struct NodeConfigFile { pub max_microblocks: Option, pub wait_time_for_microblocks: Option, pub wait_time_for_blocks: Option, + pub next_initiative_delay: Option, pub prometheus_bind: Option, pub marf_cache_strategy: Option, pub marf_defer_hashing: Option, @@ -2519,6 +2599,9 @@ impl NodeConfigFile { wait_time_for_blocks: self .wait_time_for_blocks .unwrap_or(default_node_config.wait_time_for_blocks), + next_initiative_delay: self + .next_initiative_delay + .unwrap_or(default_node_config.next_initiative_delay), prometheus_bind: self.prometheus_bind, marf_cache_strategy: self.marf_cache_strategy, marf_defer_hashing: self @@ -2567,6 +2650,7 @@ pub struct MinerConfigFile { pub first_attempt_time_ms: Option, pub subsequent_attempt_time_ms: Option, pub microblock_attempt_time_ms: Option, + pub nakamoto_attempt_time_ms: Option, pub probability_pick_no_estimate_tx: Option, pub block_reward_recipient: Option, pub segwit: Option, @@ -2600,6 +2684,9 @@ impl MinerConfigFile { microblock_attempt_time_ms: self .microblock_attempt_time_ms .unwrap_or(miner_default_config.microblock_attempt_time_ms), + nakamoto_attempt_time_ms: self + .nakamoto_attempt_time_ms + .unwrap_or(miner_default_config.nakamoto_attempt_time_ms), probability_pick_no_estimate_tx: self .probability_pick_no_estimate_tx .unwrap_or(miner_default_config.probability_pick_no_estimate_tx), diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 665334e924..f9d4a4b4fb 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -128,6 +128,7 @@ pub struct MinedMicroblockEvent { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct MinedNakamotoBlockEvent { pub target_burn_height: u64, + pub parent_block_id: String, pub block_hash: String, pub block_id: String, pub stacks_height: u64, @@ -1252,6 +1253,7 @@ impl EventDispatcher { let payload = serde_json::to_value(MinedNakamotoBlockEvent { target_burn_height, + parent_block_id: block.header.parent_block_id.to_string(), block_hash: block.header.block_hash().to_string(), block_id: block.header.block_id().to_string(), stacks_height: block.header.chain_length, diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index cb512969c0..41b7426278 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -65,7 +65,7 @@ static GLOBAL: Jemalloc = Jemalloc; fn cli_pick_best_tip(config_path: &str, at_stacks_height: Option) -> TipCandidate { info!("Loading config at path {}", config_path); let config = match ConfigFile::from_path(config_path) { - Ok(config_file) => Config::from_config_file(config_file).unwrap(), + Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { warn!("Invalid config file: {}", e); process::exit(1); @@ -105,7 +105,7 @@ fn cli_get_miner_spend( ) -> u64 { info!("Loading config at path {}", config_path); let config = match ConfigFile::from_path(&config_path) { - Ok(config_file) => Config::from_config_file(config_file).unwrap(), + Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { warn!("Invalid config file: {}", e); process::exit(1); @@ -334,7 +334,7 @@ fn main() { process::exit(1); } }; - match Config::from_config_file(config_file) { + match Config::from_config_file(config_file, true) { Ok(_) => { info!("Loaded config!"); process::exit(0); @@ -365,9 +365,11 @@ fn main() { let seed = { let config_path: Option = args.opt_value_from_str("--config").unwrap(); if let Some(config_path) = config_path { - let conf = - Config::from_config_file(ConfigFile::from_path(&config_path).unwrap()) - .unwrap(); + let conf = Config::from_config_file( + ConfigFile::from_path(&config_path).unwrap(), + true, + ) + .unwrap(); args.finish(); conf.node.seed } else { @@ -416,7 +418,7 @@ fn main() { } }; - let conf = match Config::from_config_file(config_file) { + let conf = match Config::from_config_file(config_file, true) { Ok(conf) => conf, Err(e) => { warn!("Invalid config: {}", e); diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 7b7fb32a64..8a1d80de32 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -25,6 +25,7 @@ use stacks::chainstate::stacks::Error as ChainstateError; use stacks::monitoring; use stacks::monitoring::update_active_miners_count_gauge; use stacks::net::atlas::AtlasConfig; +use stacks::net::p2p::PeerNetwork; use stacks::net::relay::Relayer; use stacks::net::stackerdb::StackerDBs; use stacks_common::types::chainstate::SortitionId; @@ -132,6 +133,7 @@ impl StacksNode { globals: Globals, // relay receiver endpoint for the p2p thread, so the relayer can feed it data to push relay_recv: Receiver, + peer_network: Option, ) -> StacksNode { let config = runloop.config().clone(); let is_miner = runloop.is_miner(); @@ -157,7 +159,8 @@ impl StacksNode { .connect_mempool_db() .expect("FATAL: database failure opening mempool"); - let mut p2p_net = NeonNode::setup_peer_network(&config, &atlas_config, burnchain); + let mut p2p_net = peer_network + .unwrap_or_else(|| NeonNode::setup_peer_network(&config, &atlas_config, burnchain)); let stackerdbs = StackerDBs::connect(&config.get_stacker_db_file_path(), true) .expect("FATAL: failed to connect to stacker DB"); @@ -167,7 +170,7 @@ impl StacksNode { let local_peer = p2p_net.local_peer.clone(); // setup initial key registration - let leader_key_registration_state = if config.node.mock_mining { + let leader_key_registration_state = if config.get_node_config(false).mock_mining { // mock mining, pretend to have a registered key let (vrf_public_key, _) = keychain.make_vrf_keypair(VRF_MOCK_MINER_KEY); LeaderKeyRegistrationState::Active(RegisteredKey { diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3a976aecca..d6edd79963 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -18,20 +18,16 @@ use std::thread; use std::thread::JoinHandle; use std::time::{Duration, Instant}; -use clarity::boot_util::boot_code_id; use clarity::vm::clarity::ClarityConnection; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use hashbrown::HashSet; -use libsigner::{ - BlockProposalSigners, MessageSlotID, SignerMessage, SignerSession, StackerDBSession, -}; +use libsigner::v1::messages::{MessageSlotID, SignerMessage}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; -use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, @@ -56,6 +52,11 @@ use crate::run_loop::nakamoto::Globals; use crate::run_loop::RegisteredKey; use crate::{neon_node, ChainTip}; +#[cfg(test)] +lazy_static::lazy_static! { + pub static ref TEST_BROADCAST_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); +} + /// If the miner was interrupted while mining a block, how long should the /// miner thread sleep before trying again? const ABORT_TRY_AGAIN_MS: u64 = 200; @@ -179,13 +180,9 @@ impl BlockMinerThread { }; if let Some(mut new_block) = new_block { - if let Err(e) = self.propose_block(&new_block, &stackerdbs) { - error!("Unrecoverable error while proposing block to signer set: {e:?}. Ending tenure."); - return; - } - let (aggregate_public_key, signers_signature) = match self.coordinate_signature( - &new_block, + &mut new_block, + self.burn_block.block_height, &mut stackerdbs, &mut attempts, ) { @@ -238,7 +235,8 @@ impl BlockMinerThread { fn coordinate_signature( &mut self, - new_block: &NakamotoBlock, + new_block: &mut NakamotoBlock, + burn_block_height: u64, stackerdbs: &mut StackerDBs, attempts: &mut u64, ) -> Result<(Point, ThresholdSignature), NakamotoNodeError> { @@ -302,18 +300,6 @@ impl BlockMinerThread { )); }; - #[cfg(test)] - { - // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - if let Some(signature) = - crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() - { - return Ok((aggregate_public_key, signature)); - } - } - let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); let mut coordinator = SignCoordinator::new( &reward_set, @@ -332,97 +318,18 @@ impl BlockMinerThread { *attempts += 1; let signature = coordinator.begin_sign( new_block, + burn_block_height, *attempts, &tip, &self.burnchain, &sort_db, &stackerdbs, + &self.globals.counters, )?; Ok((aggregate_public_key, signature)) } - fn propose_block( - &mut self, - new_block: &NakamotoBlock, - stackerdbs: &StackerDBs, - ) -> Result<(), NakamotoNodeError> { - let rpc_socket = self.config.node.get_rpc_loopback().ok_or_else(|| { - NakamotoNodeError::MinerConfigurationFailed("Could not parse RPC bind") - })?; - let miners_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); - let mut miners_session = - StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id.clone()); - let Some(miner_privkey) = self.config.miner.mining_key else { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); - let tip = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - &new_block.header.consensus_hash, - ) - .expect("FATAL: could not retrieve chain tip") - .expect("FATAL: could not retrieve chain tip"); - let reward_cycle = self - .burnchain - .pox_constants - .block_height_to_reward_cycle( - self.burnchain.first_block_height, - self.burn_block.block_height, - ) - .expect("FATAL: building on a burn block that is before the first burn block"); - - let proposal_msg = BlockProposalSigners { - block: new_block.clone(), - burn_height: self.burn_block.block_height, - reward_cycle, - }; - let proposal = match NakamotoBlockBuilder::make_stackerdb_block_proposal( - &sort_db, - &tip, - &stackerdbs, - &proposal_msg, - &miner_privkey, - &miners_contract_id, - ) { - Ok(Some(chunk)) => chunk, - Ok(None) => { - warn!("Failed to propose block to stackerdb: no slot available"); - return Ok(()); - } - Err(e) => { - warn!("Failed to propose block to stackerdb: {e:?}"); - return Ok(()); - } - }; - - // Propose the block to the observing signers through the .miners stackerdb instance - match miners_session.put_chunk(&proposal) { - Ok(ack) => { - info!( - "Proposed block to stackerdb"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "ack_msg" => ?ack, - ); - } - Err(e) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to propose block to stackerdb {e:?}" - ))); - } - } - - self.globals.counters.bump_naka_proposed_blocks(); - Ok(()) - } - fn get_stackerdb_contract_and_slots( &self, stackerdbs: &StackerDBs, @@ -541,6 +448,23 @@ impl BlockMinerThread { block: NakamotoBlock, aggregate_public_key: &Point, ) -> Result<(), ChainstateError> { + #[cfg(test)] + { + if *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Broadcasting is stalled due to testing directive."; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + ); + while *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Broadcasting is no longer stalled due to testing directive."; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + ); + } + } let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); let chainstate_config = chain_state.config(); @@ -660,9 +584,18 @@ impl BlockMinerThread { burn_db: &mut SortitionDB, chain_state: &mut StacksChainState, ) -> Result { - let Some(stacks_tip) = - NakamotoChainState::get_canonical_block_header(chain_state.db(), burn_db) - .expect("FATAL: could not query chain tip") + // The nakamoto miner must always build off of a chain tip that is the highest of: + // 1. The highest block in the miner's current tenure + // 2. The highest block in the current tenure's parent tenure + // Where the current tenure's parent tenure is the tenure start block committed to in the current tenure's associated block commit. + let stacks_block_id = if let Some(block) = self.mined_blocks.last() { + block.block_id() + } else { + self.parent_tenure_id + }; + let Some(mut stacks_tip_header) = + NakamotoChainState::get_block_header(chain_state.db(), &stacks_block_id) + .expect("FATAL: could not query prior stacks block id") else { debug!("No Stacks chain tip known, will return a genesis block"); let burnchain_params = burnchain_params_from_config(&self.config.burnchain); @@ -683,6 +616,19 @@ impl BlockMinerThread { }); }; + if self.mined_blocks.is_empty() { + // We could call this even if self.mined_blocks was not empty, but would return the same value, so save the effort and only do it when necessary. + // If we are starting a new tenure, then make sure we are building off of the last block of our parent tenure + if let Some(last_tenure_finish_block_header) = + NakamotoChainState::get_nakamoto_tenure_finish_block_header( + chain_state.db(), + &stacks_tip_header.consensus_hash, + ) + .expect("FATAL: could not query parent tenure finish block") + { + stacks_tip_header = last_tenure_finish_block_header; + } + } let miner_address = self .keychain .origin_address(self.config.is_mainnet()) @@ -693,7 +639,7 @@ impl BlockMinerThread { &self.burn_block, miner_address, &self.parent_tenure_id, - stacks_tip, + stacks_tip_header, ) { Ok(parent_info) => Ok(parent_info), Err(NakamotoNodeError::BurnchainTipChanged) => { @@ -710,7 +656,7 @@ impl BlockMinerThread { fn make_vrf_proof(&mut self) -> Option { // if we're a mock miner, then make sure that the keychain has a keypair for the mocked VRF // key - let vrf_proof = if self.config.node.mock_mining { + let vrf_proof = if self.config.get_node_config(false).mock_mining { self.keychain.generate_proof( VRF_MOCK_MINER_KEY, self.burn_block.sortition_hash.as_bytes(), @@ -802,13 +748,12 @@ impl BlockMinerThread { parent_block_info.stacks_parent_header.microblock_tail = None; - let block_num = u64::try_from(self.mined_blocks.len()) - .map_err(|_| NakamotoNodeError::UnexpectedChainState)? - .saturating_add(1); - let signer_transactions = self.get_signer_transactions(&mut chain_state, &burn_db, &stackerdbs)?; + let signer_bitvec_len = + &burn_db.get_preprocessed_reward_set_size(&self.burn_block.sortition_id); + // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, @@ -818,15 +763,13 @@ impl BlockMinerThread { &self.burn_block.consensus_hash, self.burn_block.total_burn, tenure_start_info, - self.config.make_block_builder_settings( - block_num, - false, - self.globals.get_miner_status(), - ), + self.config + .make_nakamoto_block_builder_settings(self.globals.get_miner_status()), // we'll invoke the event dispatcher ourselves so that it calculates the // correct signer_sighash for `process_mined_nakamoto_block_event` Some(&self.event_dispatcher), signer_transactions, + signer_bitvec_len.unwrap_or(0), ) .map_err(|e| { if !matches!( diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index eeb6789d30..dc060e06b6 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -182,8 +182,13 @@ impl PeerThread { .parse() .unwrap_or_else(|_| panic!("Failed to parse socket: {}", &config.node.rpc_bind)); - net.bind(&p2p_sock, &rpc_sock) - .expect("BUG: PeerNetwork could not bind or is already bound"); + let did_bind = net + .try_bind(&p2p_sock, &rpc_sock) + .expect("BUG: PeerNetwork could not bind"); + + if !did_bind { + info!("`PeerNetwork::bind()` skipped, already bound"); + } let poll_timeout = cmp::min(5000, config.miner.first_attempt_time_ms / 2); diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index f638ae9324..fc4ca1ae0d 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -64,6 +64,11 @@ use crate::run_loop::nakamoto::{Globals, RunLoop}; use crate::run_loop::RegisteredKey; use crate::BitcoinRegtestController; +#[cfg(test)] +lazy_static::lazy_static! { + pub static ref TEST_SKIP_COMMIT_OP: std::sync::Mutex> = std::sync::Mutex::new(None); +} + /// Command types for the Nakamoto relayer thread, issued to it by other threads pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and @@ -185,8 +190,10 @@ impl RelayerThread { let bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone()); + let next_initiative_delay = config.node.next_initiative_delay; + RelayerThread { - config: config, + config, sortdb, chainstate, mempool, @@ -210,7 +217,7 @@ impl RelayerThread { miner_thread: None, is_miner, - next_initiative: Instant::now() + Duration::from_secs(10), + next_initiative: Instant::now() + Duration::from_millis(next_initiative_delay), last_committed: None, } } @@ -682,6 +689,16 @@ impl RelayerThread { ) -> Result<(), NakamotoNodeError> { let (last_committed_at, target_epoch_id, commit) = self.make_block_commit(&tenure_start_ch, &tenure_start_bh)?; + #[cfg(test)] + { + if TEST_SKIP_COMMIT_OP.lock().unwrap().unwrap_or(false) { + //if let Some((last_committed, ..)) = self.last_committed.as_ref() { + // if last_committed.consensus_hash == last_committed_at.consensus_hash { + warn!("Relayer: not submitting block-commit to bitcoin network due to test directive."); + return Ok(()); + //} + } + } let mut op_signer = self.keychain.generate_op_signer(); let txid = self .bitcoin_controller @@ -804,10 +821,12 @@ impl RelayerThread { pub fn main(mut self, relay_rcv: Receiver) { debug!("relayer thread ID is {:?}", std::thread::current().id()); - self.next_initiative = Instant::now() + Duration::from_secs(10); + self.next_initiative = + Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); while self.globals.keep_running() { let directive = if Instant::now() >= self.next_initiative { - self.next_initiative = Instant::now() + Duration::from_secs(10); + self.next_initiative = + Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); self.initiative() } else { None diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b1118bebff..4667958911 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -17,9 +17,8 @@ use std::sync::mpsc::Receiver; use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; -use libsigner::{ - MessageSlotID, SignerEntries, SignerEvent, SignerMessage, SignerSession, StackerDBSession, -}; +use libsigner::v1::messages::{MessageSlotID, SignerMessage}; +use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::BlockSnapshot; @@ -30,6 +29,7 @@ use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; use stacks::util_lib::boot::boot_code_id; +use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; use wsts::common::PolyCommitment; @@ -43,6 +43,7 @@ use wsts::v2::Aggregator; use super::Error as NakamotoNodeError; use crate::event_dispatcher::STACKER_DB_CHANNEL; +use crate::neon::Counters; use crate::Config; /// How long should the coordinator poll on the event receiver before @@ -62,6 +63,7 @@ pub struct SignCoordinator { is_mainnet: bool, miners_session: StackerDBSession, signing_round_timeout: Duration, + pub next_signer_bitvec: BitVec<4000>, } pub struct NakamotoSigningParams { @@ -209,6 +211,15 @@ impl SignCoordinator { let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); + let next_signer_bitvec: BitVec<4000> = BitVec::zeros( + reward_set_signers + .clone() + .len() + .try_into() + .expect("FATAL: signer set length greater than u16"), + ) + .expect("FATAL: unable to construct initial bitvec for signer set"); + let NakamotoSigningParams { num_signers, num_keys, @@ -238,6 +249,34 @@ impl SignCoordinator { }; let mut coordinator: FireCoordinator = FireCoordinator::new(coord_config); + #[cfg(test)] + { + // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING + // channel has been created. This allows integration tests for the stacks-node + // independent of the stacks-signer. + use crate::tests::nakamoto_integrations::TEST_SIGNING; + if TEST_SIGNING.lock().unwrap().is_some() { + debug!("Short-circuiting spinning up coordinator from signer commitments. Using test signers channel."); + let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); + if replaced_other { + warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); + } + let mut sign_coordinator = Self { + coordinator, + message_key, + receiver: Some(receiver), + wsts_public_keys, + is_mainnet, + miners_session, + signing_round_timeout: config.miner.wait_on_signers.clone(), + next_signer_bitvec, + }; + sign_coordinator + .coordinator + .set_aggregate_public_key(Some(aggregate_public_key)); + return Ok(sign_coordinator); + } + } let party_polynomials = get_signer_commitments( is_mainnet, reward_set_signers.as_slice(), @@ -264,6 +303,7 @@ impl SignCoordinator { is_mainnet, miners_session, signing_round_timeout: config.miner.wait_on_signers.clone(), + next_signer_bitvec, }) } @@ -291,8 +331,8 @@ impl SignCoordinator { else { return Err("No slot for miner".into()); }; - let target_slot = 1; - let slot_id = slot_range.start + target_slot; + // We only have one slot per miner + let slot_id = slot_range.start; if !slot_range.contains(&slot_id) { return Err("Not enough slots for miner messages".into()); } @@ -322,14 +362,17 @@ impl SignCoordinator { } } + #[cfg_attr(test, mutants::skip)] pub fn begin_sign( &mut self, block: &NakamotoBlock, + burn_block_height: u64, block_attempt: u64, burn_tip: &BlockSnapshot, burnchain: &Burnchain, sortdb: &SortitionDB, stackerdbs: &StackerDBs, + counters: &Counters, ) -> Result { let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); let sign_iter_id = block_attempt; @@ -339,7 +382,13 @@ impl SignCoordinator { self.coordinator.current_sign_id = sign_id; self.coordinator.current_sign_iter_id = sign_iter_id; - let block_bytes = block.serialize_to_vec(); + let proposal_msg = BlockProposal { + block: block.clone(), + burn_height: burn_block_height, + reward_cycle: reward_cycle_id, + }; + + let block_bytes = proposal_msg.serialize_to_vec(); let nonce_req_msg = self .coordinator .start_signing_round(&block_bytes, false, None) @@ -358,6 +407,19 @@ impl SignCoordinator { &mut self.miners_session, ) .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; + counters.bump_naka_proposed_blocks(); + #[cfg(test)] + { + // In test mode, short-circuit waiting for the signers if the TEST_SIGNING + // channel has been created. This allows integration tests for the stacks-node + // independent of the stacks-signer. + if let Some(signature) = + crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() + { + debug!("Short-circuiting waiting for signers, using test signature"); + return Ok(signature); + } + } let Some(ref mut receiver) = self.receiver else { return Err(NakamotoNodeError::SigningCoordinatorFailure( @@ -385,6 +447,22 @@ impl SignCoordinator { debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); continue; } + let modified_slots = &event.modified_slots; + + // Update `next_signers_bitvec` with the slots that were modified in the event + modified_slots.iter().for_each(|chunk| { + if let Ok(slot_id) = chunk.slot_id.try_into() { + match &self.next_signer_bitvec.set(slot_id, true) { + Err(e) => { + warn!("Failed to set bitvec for next signer: {e:?}"); + } + _ => (), + }; + } else { + error!("FATAL: slot_id greater than u16, which should never happen."); + } + }); + let Ok(signer_event) = SignerEvent::try_from(event).map_err(|e| { warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); }) else { @@ -407,6 +485,7 @@ impl SignCoordinator { .filter_map(|msg| match msg { SignerMessage::DkgResults { .. } | SignerMessage::BlockResponse(_) + | SignerMessage::EncryptedSignerState(_) | SignerMessage::Transactions(_) => None, SignerMessage::Packet(packet) => { debug!("Received signers packet: {packet:?}"); @@ -454,6 +533,10 @@ impl SignCoordinator { "Signature failed to validate over the expected block".into(), )); } else { + info!( + "SignCoordinator: Generated a valid signature for the block"; + "next_signer_bitvec" => self.next_signer_bitvec.binary_str(), + ); return Ok(signature); } } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 3778c8ecc9..edb83db896 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -153,7 +153,7 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::{Burnchain, BurnchainSigner, PoxConstants, Txid}; -use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleConn}; use stacks::chainstate::burn::operations::leader_block_commit::{ RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS, }; @@ -298,7 +298,7 @@ pub struct StacksNode { /// True if we're a miner is_miner: bool, /// handle to the p2p thread - pub p2p_thread_handle: JoinHandle<()>, + pub p2p_thread_handle: JoinHandle>, /// handle to the relayer thread pub relayer_thread_handle: JoinHandle<()>, } @@ -1144,6 +1144,50 @@ impl BlockMinerThread { ret } + /// Is a given Stacks staging block on the canonical burnchain fork? + pub(crate) fn is_on_canonical_burnchain_fork( + candidate: &StagingBlock, + sortdb_tip_handle: &SortitionHandleConn, + ) -> bool { + let candidate_ch = &candidate.consensus_hash; + let candidate_burn_ht = match SortitionDB::get_block_snapshot_consensus( + sortdb_tip_handle.conn(), + candidate_ch, + ) { + Ok(Some(x)) => x.block_height, + Ok(None) => { + warn!("Tried to evaluate potential chain tip with an unknown consensus hash"; + "consensus_hash" => %candidate_ch, + "stacks_block_hash" => %candidate.anchored_block_hash); + return false; + } + Err(e) => { + warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash"; + "consensus_hash" => %candidate_ch, + "stacks_block_hash" => %candidate.anchored_block_hash, + "err" => ?e); + return false; + } + }; + let tip_ch = match sortdb_tip_handle.get_consensus_at(candidate_burn_ht) { + Ok(Some(x)) => x, + Ok(None) => { + warn!("Tried to evaluate potential chain tip with a consensus hash ahead of canonical tip"; + "consensus_hash" => %candidate_ch, + "stacks_block_hash" => %candidate.anchored_block_hash); + return false; + } + Err(e) => { + warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash"; + "consensus_hash" => %candidate_ch, + "stacks_block_hash" => %candidate.anchored_block_hash, + "err" => ?e); + return false; + } + }; + &tip_ch == candidate_ch + } + /// Load all candidate tips upon which to build. This is all Stacks blocks whose heights are /// less than or equal to at `at_stacks_height` (or the canonical chain tip height, if not given), /// but greater than or equal to this end height minus `max_depth`. @@ -1173,61 +1217,42 @@ impl BlockMinerThread { let stacks_tips: Vec<_> = stacks_tips .into_iter() - .filter(|candidate| { - let candidate_ch = &candidate.consensus_hash; - let candidate_burn_ht = match SortitionDB::get_block_snapshot_consensus( - sortdb_tip_handle.conn(), - candidate_ch - ) { - Ok(Some(x)) => x.block_height, - Ok(None) => { - warn!("Tried to evaluate potential chain tip with an unknown consensus hash"; - "consensus_hash" => %candidate_ch, - "stacks_block_hash" => %candidate.anchored_block_hash); - return false; - }, - Err(e) => { - warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash"; - "consensus_hash" => %candidate_ch, - "stacks_block_hash" => %candidate.anchored_block_hash, - "err" => ?e); - return false; - }, - }; - let tip_ch = match sortdb_tip_handle.get_consensus_at(candidate_burn_ht) { - Ok(Some(x)) => x, - Ok(None) => { - warn!("Tried to evaluate potential chain tip with a consensus hash ahead of canonical tip"; - "consensus_hash" => %candidate_ch, - "stacks_block_hash" => %candidate.anchored_block_hash); - return false; - }, - Err(e) => { - warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash"; - "consensus_hash" => %candidate_ch, - "stacks_block_hash" => %candidate.anchored_block_hash, - "err" => ?e); - return false; - }, - }; - if &tip_ch != candidate_ch { - false - } else { - true - } - }) + .filter(|candidate| Self::is_on_canonical_burnchain_fork(candidate, &sortdb_tip_handle)) .collect(); + if stacks_tips.len() == 0 { + return vec![]; + } + let mut considered = HashSet::new(); let mut candidates = vec![]; let end_height = stacks_tips[0].height; - for cur_height in end_height.saturating_sub(max_depth)..=end_height { - let stacks_tips = chain_state + // process these tips + for tip in stacks_tips.into_iter() { + let index_block_hash = + StacksBlockId::new(&tip.consensus_hash, &tip.anchored_block_hash); + let burn_height = burn_db + .get_consensus_hash_height(&tip.consensus_hash) + .expect("FATAL: could not query burnchain block height") + .expect("FATAL: no burnchain block height for Stacks tip"); + let candidate = TipCandidate::new(tip, burn_height); + candidates.push(candidate); + considered.insert(index_block_hash); + } + + // process earlier tips, back to max_depth + for cur_height in end_height.saturating_sub(max_depth)..end_height { + let stacks_tips: Vec<_> = chain_state .get_stacks_chain_tips_at_height(cur_height) - .expect("FATAL: could not query chain tips at height"); + .expect("FATAL: could not query chain tips at height") + .into_iter() + .filter(|candidate| { + Self::is_on_canonical_burnchain_fork(candidate, &sortdb_tip_handle) + }) + .collect(); - for tip in stacks_tips { + for tip in stacks_tips.into_iter() { let index_block_hash = StacksBlockId::new(&tip.consensus_hash, &tip.anchored_block_hash); @@ -1712,7 +1737,7 @@ impl BlockMinerThread { fn make_vrf_proof(&mut self) -> Option { // if we're a mock miner, then make sure that the keychain has a keypair for the mocked VRF // key - let vrf_proof = if self.config.node.mock_mining { + let vrf_proof = if self.config.get_node_config(false).mock_mining { self.keychain.generate_proof( VRF_MOCK_MINER_KEY, self.burn_block.sortition_hash.as_bytes(), @@ -2535,7 +2560,7 @@ impl BlockMinerThread { let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); if res.is_none() { self.failed_to_submit_last_attempt = true; - if !self.config.node.mock_mining { + if !self.config.get_node_config(false).mock_mining { warn!("Relayer: Failed to submit Bitcoin transaction"); return None; } @@ -3518,7 +3543,7 @@ impl RelayerThread { return false; } - if !self.config.node.mock_mining { + if !self.config.get_node_config(false).mock_mining { // mock miner can't mine microblocks yet, so don't stop it from trying multiple // anchored blocks if self.mined_stacks_block && self.config.node.mine_microblocks { @@ -4171,7 +4196,7 @@ impl PeerThread { net.bind(&p2p_sock, &rpc_sock) .expect("BUG: PeerNetwork could not bind or is already bound"); - let poll_timeout = cmp::min(5000, config.miner.first_attempt_time_ms / 2); + let poll_timeout = config.get_poll_time(); PeerThread { config, @@ -4655,7 +4680,10 @@ impl StacksNode { /// Main loop of the p2p thread. /// Runs in a separate thread. /// Continuously receives, until told otherwise. - pub fn p2p_main(mut p2p_thread: PeerThread, event_dispatcher: EventDispatcher) { + pub fn p2p_main( + mut p2p_thread: PeerThread, + event_dispatcher: EventDispatcher, + ) -> Option { let should_keep_running = p2p_thread.globals.should_keep_running.clone(); let (mut dns_resolver, mut dns_client) = DNSResolver::new(10); @@ -4718,6 +4746,7 @@ impl StacksNode { thread::sleep(Duration::from_secs(5)); } info!("P2P thread exit!"); + p2p_thread.net } /// This function sets the global var `GLOBAL_BURNCHAIN_SIGNER`. @@ -4777,7 +4806,7 @@ impl StacksNode { let local_peer = p2p_net.local_peer.clone(); // setup initial key registration - let leader_key_registration_state = if config.node.mock_mining { + let leader_key_registration_state = if config.get_node_config(false).mock_mining { // mock mining, pretend to have a registered key let (vrf_public_key, _) = keychain.make_vrf_keypair(VRF_MOCK_MINER_KEY); LeaderKeyRegistrationState::Active(RegisteredKey { @@ -4814,7 +4843,7 @@ impl StacksNode { )) .spawn(move || { debug!("p2p thread ID is {:?}", thread::current().id()); - Self::p2p_main(p2p_thread, p2p_event_dispatcher); + Self::p2p_main(p2p_thread, p2p_event_dispatcher) }) .expect("FATAL: failed to start p2p thread"); @@ -5017,8 +5046,8 @@ impl StacksNode { } /// Join all inner threads - pub fn join(self) { + pub fn join(self) -> Option { self.relayer_thread_handle.join().unwrap(); - self.p2p_thread_handle.join().unwrap(); + self.p2p_thread_handle.join().unwrap() } } diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index dec1ca757f..087e1424ee 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -108,7 +108,7 @@ impl BootRunLoop { let InnerLoops::Epoch3(ref mut naka_loop) = self.active_loop else { panic!("FATAL: unexpectedly invoked start_from_naka when active loop wasn't nakamoto"); }; - naka_loop.start(burnchain_opt, mine_start) + naka_loop.start(burnchain_opt, mine_start, None) } fn start_from_neon(&mut self, burnchain_opt: Option, mine_start: u64) { @@ -120,7 +120,7 @@ impl BootRunLoop { let boot_thread = Self::spawn_stopper(&self.config, neon_loop) .expect("FATAL: failed to spawn epoch-2/3-boot thread"); - neon_loop.start(burnchain_opt.clone(), mine_start); + let peer_network = neon_loop.start(burnchain_opt.clone(), mine_start); let monitoring_thread = neon_loop.take_monitoring_thread(); // did we exit because of the epoch-3.0 transition, or some other reason? @@ -150,7 +150,7 @@ impl BootRunLoop { let InnerLoops::Epoch3(ref mut naka_loop) = self.active_loop else { panic!("FATAL: unexpectedly found epoch2 loop after setting epoch3 active"); }; - naka_loop.start(burnchain_opt, mine_start) + naka_loop.start(burnchain_opt, mine_start, peer_network) } fn spawn_stopper( diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index dd13b2d32c..997327287d 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -31,6 +31,7 @@ use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; +use stacks::net::p2p::PeerNetwork; use stacks_common::types::PublicKey; use stacks_common::util::hash::Hash160; use stx_genesis::GenesisData; @@ -195,7 +196,7 @@ impl RunLoop { return true; } } - if self.config.node.mock_mining { + if self.config.get_node_config(false).mock_mining { info!("No UTXOs found, but configured to mock mine"); return true; } else { @@ -392,7 +393,12 @@ impl RunLoop { /// It will start the burnchain (separate thread), set-up a channel in /// charge of coordinating the new blocks coming from the burnchain and /// the nodes, taking turns on tenures. - pub fn start(&mut self, burnchain_opt: Option, mut mine_start: u64) { + pub fn start( + &mut self, + burnchain_opt: Option, + mut mine_start: u64, + peer_network: Option, + ) { let (coordinator_receivers, coordinator_senders) = self .coordinator_channels .take() @@ -475,7 +481,7 @@ impl RunLoop { // Boot up the p2p network and relayer, and figure out how many sortitions we have so far // (it could be non-zero if the node is resuming from chainstate) - let mut node = StacksNode::spawn(self, globals.clone(), relay_recv); + let mut node = StacksNode::spawn(self, globals.clone(), relay_recv, peer_network); // Wait for all pending sortitions to process let burnchain_db = burnchain_config diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 6f2f643d30..157fa71cd7 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -21,6 +21,7 @@ use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; +use stacks::net::p2p::PeerNetwork; use stacks::util_lib::db::Error as db_error; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; @@ -373,7 +374,7 @@ impl RunLoop { return true; } } - if self.config.node.mock_mining { + if self.config.get_node_config(false).mock_mining { info!("No UTXOs found, but configured to mock mine"); return true; } else { @@ -999,7 +1000,11 @@ impl RunLoop { /// It will start the burnchain (separate thread), set-up a channel in /// charge of coordinating the new blocks coming from the burnchain and /// the nodes, taking turns on tenures. - pub fn start(&mut self, burnchain_opt: Option, mut mine_start: u64) { + pub fn start( + &mut self, + burnchain_opt: Option, + mut mine_start: u64, + ) -> Option { let (coordinator_receivers, coordinator_senders) = self .coordinator_channels .take() @@ -1018,12 +1023,12 @@ impl RunLoop { Ok(burnchain_controller) => burnchain_controller, Err(burnchain_error::ShutdownInitiated) => { info!("Exiting stacks-node"); - return; + return None; } Err(e) => { error!("Error initializing burnchain: {}", e); info!("Exiting stacks-node"); - return; + return None; } }; @@ -1142,11 +1147,11 @@ impl RunLoop { globals.coord().stop_chains_coordinator(); coordinator_thread_handle.join().unwrap(); - node.join(); + let peer_network = node.join(); liveness_thread.join().unwrap(); info!("Exiting stacks-node"); - break; + break peer_network; } let remote_chain_height = burnchain.get_headers_height() - 1; @@ -1269,7 +1274,7 @@ impl RunLoop { if !node.relayer_sortition_notify() { // relayer hung up, exit. error!("Runloop: Block relayer and miner hung up, exiting."); - return; + return None; } } @@ -1343,7 +1348,7 @@ impl RunLoop { if !node.relayer_issue_tenure(ibd) { // relayer hung up, exit. error!("Runloop: Block relayer and miner hung up, exiting."); - break; + break None; } } } diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 2cc9868dc6..c5a30e350b 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -28,7 +28,6 @@ use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks::clarity_cli::vm_execute as execute; use stacks::core; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; -use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey}; use stacks_common::types::Address; @@ -37,6 +36,7 @@ use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::stacks_common::codec::StacksMessageCodec; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::{ get_account, get_chain_info, get_pox_info, neon_integration_test_conf, next_block_and_wait, diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 35dca5b537..694d27ca15 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -22,6 +22,7 @@ use stacks::chainstate::stacks::{ TransactionContractCall, TransactionPayload, }; use stacks::clarity_vm::clarity::ClarityConnection; +use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -31,7 +32,6 @@ use stacks::net::api::callreadonly::CallReadOnlyRequestBody; use stacks::net::api::getaccount::AccountEntryResponse; use stacks::net::api::getcontractsrc::ContractSrcResponse; use stacks::net::api::getistraitimplemented::GetIsTraitImplementedResponse; -use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, VRFSeed}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha256Sum}; diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index 8c906cd43e..6221c6cf11 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -13,13 +13,13 @@ use stacks::chainstate::stacks::{ TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionSpendingCondition, TransactionVersion, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, }; +use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolDB; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; use stacks::net::Error as NetError; use stacks_common::address::AddressHashMode; -use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress}; use stacks_common::util::hash::*; use stacks_common::util::secp256k1::*; diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 5a237e6e20..0b8c379f7c 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -21,7 +21,7 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::database::BurnStateDB; use clarity::vm::events::STXEventType; use clarity::vm::types::PrincipalData; -use clarity::vm::{ClarityName, ContractName, Value}; +use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; use lazy_static::lazy_static; use rand::RngCore; use stacks::chainstate::burn::ConsensusHash; @@ -223,35 +223,49 @@ pub fn serialize_sign_tx_anchor_mode_version( buf } -pub fn make_contract_publish( +pub fn make_contract_publish_versioned( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, contract_name: &str, contract_content: &str, + version: Option, ) -> Vec { let name = ContractName::from(contract_name); let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); - let payload = TransactionSmartContract { name, code_body }; + let payload = + TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee) } -pub fn make_contract_publish_microblock_only( +pub fn make_contract_publish( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + contract_name: &str, + contract_content: &str, +) -> Vec { + make_contract_publish_versioned(sender, nonce, tx_fee, contract_name, contract_content, None) +} + +pub fn make_contract_publish_microblock_only_versioned( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, contract_name: &str, contract_content: &str, + version: Option, ) -> Vec { let name = ContractName::from(contract_name); let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); - let payload = TransactionSmartContract { name, code_body }; + let payload = + TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); serialize_sign_standard_single_sig_tx_anchor_mode( - payload.into(), + payload, sender, nonce, tx_fee, @@ -259,6 +273,23 @@ pub fn make_contract_publish_microblock_only( ) } +pub fn make_contract_publish_microblock_only( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + contract_name: &str, + contract_content: &str, +) -> Vec { + make_contract_publish_microblock_only_versioned( + sender, + nonce, + tx_fee, + contract_name, + contract_content, + None, + ) +} + pub fn new_test_conf() -> Config { // secretKey: "b1cf9cee5083f421c84d7cb53be5edf2801c3c78d63d53917aee0bdc8bd160ee01", // publicKey: "03e2ed46873d0db820e8c6001aabc082d72b5b900b53b7a1b9714fe7bde3037b81", @@ -567,7 +598,7 @@ fn should_succeed_mining_valid_txs() { }, 3 => { // On round 3, publish a "set:foo=bar" transaction - // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 2 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store set-value -e \"foo\" -e \"bar\" + // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 2 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store set-value -e \"foo\" -e \"bar\" let set_foo_bar = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000002000000000000000a010142a01caf6a32b367664869182f0ebc174122a5a980937ba259d44cc3ebd280e769a53dd3913c8006ead680a6e1c98099fcd509ce94b0a4e90d9f4603b101922d030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265097365742d76616c7565000000020d00000003666f6f0d00000003626172"; tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), &ExecutionCost::max_value(), diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index aa545514f0..55eb6753bf 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -24,9 +24,11 @@ use std::{env, thread}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use clarity::vm::ClarityVersion; use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; -use libsigner::{SignerSession, StackerDBSession}; +use libsigner::v1::messages::SignerMessage; +use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use rand::RngCore; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -72,17 +74,23 @@ use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; +use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; +use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ - get_account, get_chain_info_result, get_pox_info, next_block_and_wait, + call_read_only, get_account, get_chain_info_result, get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, }; -use crate::tests::{get_chain_info, make_stacks_transfer, to_addr}; +use crate::tests::{ + get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, + to_addr, +}; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; @@ -292,6 +300,37 @@ pub fn blind_signer( }) } +pub fn get_latest_block_proposal( + conf: &Config, + sortdb: &SortitionDB, +) -> Result { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let miner_pubkey = StacksPublicKey::from_private(&conf.get_miner_config().mining_key.unwrap()); + let miner_slot_id = NakamotoChainState::get_miner_slot(&sortdb, &tip, &miner_pubkey) + .map_err(|_| "Unable to get miner slot")? + .ok_or("No miner slot exists")?; + + let proposed_block = { + let miner_contract_id = boot_code_id(MINERS_NAME, false); + let mut miners_stackerdb = StackerDBSession::new(&conf.node.rpc_bind, miner_contract_id); + let message: SignerMessage = miners_stackerdb + .get_latest(miner_slot_id.start) + .expect("Failed to get latest chunk from the miner slot ID") + .expect("No chunk found"); + let SignerMessage::Packet(packet) = message else { + panic!("Expected a signer message packet. Got {message:?}"); + }; + let Message::NonceRequest(nonce_request) = packet.msg else { + panic!("Expected a nonce request. Got {:?}", packet.msg); + }; + let block_proposal = + BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()) + .expect("Failed to deserialize block proposal"); + block_proposal.block + }; + Ok(proposed_block) +} + pub fn read_and_sign_block_proposal( conf: &Config, signers: &TestSigners, @@ -301,22 +340,11 @@ pub fn read_and_sign_block_proposal( let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let miner_pubkey = StacksPublicKey::from_private(&conf.get_miner_config().mining_key.unwrap()); - let miner_slot_id = NakamotoChainState::get_miner_slot(&sortdb, &tip, &miner_pubkey) - .map_err(|_| "Unable to get miner slot")? - .ok_or("No miner slot exists")?; let reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); - let mut proposed_block: NakamotoBlock = { - let miner_contract_id = boot_code_id(MINERS_NAME, false); - let mut miners_stackerdb = StackerDBSession::new(&conf.node.rpc_bind, miner_contract_id); - miners_stackerdb - .get_latest(miner_slot_id.start) - .map_err(|_| "Failed to get latest chunk from the miner slot ID")? - .ok_or("No chunk found")? - }; + let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?; let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let signer_sig_hash = proposed_block.header.signer_signature_hash(); @@ -807,7 +835,7 @@ fn signer_vote_if_needed( /// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order /// for pox-4 to activate /// * `signer_pks` - must be the same size as `stacker_sks` -pub fn boot_to_epoch_3_reward_set( +pub fn boot_to_epoch_3_reward_set_calculation_boundary( naka_conf: &Config, blocks_processed: &Arc, stacker_sks: &[StacksPrivateKey], @@ -828,9 +856,9 @@ pub fn boot_to_epoch_3_reward_set( ); let epoch_3_reward_cycle_boundary = epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); - let epoch_3_reward_set_calculation_boundary = - epoch_3_reward_cycle_boundary.saturating_sub(prepare_phase_len); - let epoch_3_reward_set_calculation = epoch_3_reward_set_calculation_boundary.wrapping_add(2); // +2 to ensure we are at the second block of the prepare phase + let epoch_3_reward_set_calculation_boundary = epoch_3_reward_cycle_boundary + .saturating_sub(prepare_phase_len) + .wrapping_add(1); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); next_block_and_wait(btc_regtest_controller, &blocks_processed); next_block_and_wait(btc_regtest_controller, &blocks_processed); @@ -850,7 +878,6 @@ pub fn boot_to_epoch_3_reward_set( "block_height" => {block_height}, "reward_cycle" => {reward_cycle}, "epoch_3_reward_cycle_boundary" => {epoch_3_reward_cycle_boundary}, - "epoch_3_reward_set_calculation" => {epoch_3_reward_set_calculation}, "epoch_3_start_height" => {epoch_3_start_height}, ); for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { @@ -899,10 +926,39 @@ pub fn boot_to_epoch_3_reward_set( run_until_burnchain_height( btc_regtest_controller, &blocks_processed, - epoch_3_reward_set_calculation, + epoch_3_reward_set_calculation_boundary, &naka_conf, ); + info!("Bootstrapped to Epoch 3.0 reward set calculation boundary height: {epoch_3_reward_set_calculation_boundary}."); +} + +/// +/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +/// * `signer_pks` - must be the same size as `stacker_sks` +pub fn boot_to_epoch_3_reward_set( + naka_conf: &Config, + blocks_processed: &Arc, + stacker_sks: &[StacksPrivateKey], + signer_sks: &[StacksPrivateKey], + btc_regtest_controller: &mut BitcoinRegtestController, +) { + boot_to_epoch_3_reward_set_calculation_boundary( + naka_conf, + blocks_processed, + stacker_sks, + signer_sks, + btc_regtest_controller, + ); + let epoch_3_reward_set_calculation = + btc_regtest_controller.get_headers_height().wrapping_add(1); + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + epoch_3_reward_set_calculation, + &naka_conf, + ); info!("Bootstrapped to Epoch 3.0 reward set calculation height: {epoch_3_reward_set_calculation}."); } @@ -933,7 +989,7 @@ fn simple_neon_integration() { let send_fee = 100; naka_conf.add_initial_balance( PrincipalData::from(sender_addr.clone()).to_string(), - send_amt + send_fee, + send_amt * 2 + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); @@ -1262,6 +1318,7 @@ fn mine_multiple_per_tenure_integration() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { + debug!("Mining tenure {}", tenure_ix); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -1852,6 +1909,7 @@ fn block_proposal_api_endpoint() { total_burn, tenure_change, coinbase, + 1, ) .expect("Failed to build Nakamoto block"); @@ -2152,22 +2210,9 @@ fn miner_writes_proposed_block_to_stackerdb() { .unwrap(); let sortdb = naka_conf.get_burnchain().open_sortition_db(true).unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let miner_pubkey = - StacksPublicKey::from_private(&naka_conf.get_miner_config().mining_key.unwrap()); - let slot_id = NakamotoChainState::get_miner_slot(&sortdb, &tip, &miner_pubkey) - .expect("Unable to get miner slot") - .expect("No miner slot exists"); - let proposed_block: NakamotoBlock = { - let miner_contract_id = boot_code_id(MINERS_NAME, false); - let mut miners_stackerdb = - StackerDBSession::new(&naka_conf.node.rpc_bind, miner_contract_id); - miners_stackerdb - .get_latest(slot_id.start) - .expect("Failed to get latest chunk from the miner slot ID") - .expect("No chunk found") - }; + let proposed_block = get_latest_block_proposal(&naka_conf, &sortdb) + .expect("Expected to find a proposed block in the StackerDB"); let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let mut proposed_zero_block = proposed_block.clone(); @@ -3136,3 +3181,717 @@ fn stack_stx_burn_op_integration_test() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// Miner A mines a regular tenure, its last block being block a_x. +/// Miner B starts its tenure, Miner B produces a Stacks block b_0, but miner C submits its block commit before b_0 is broadcasted. +/// Bitcoin block C, containing Miner C's block commit, is mined BEFORE miner C has a chance to update their block commit with b_0's information. +/// This test asserts: +/// * tenure C ignores b_0, and correctly builds off of block a_x. +fn forked_tenure_is_ignored() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(10); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt + send_fee, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let stacker_sk = setup_stacker(&mut naka_conf); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + naka_mined_blocks: mined_blocks, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + Some(&signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + info!("Starting tenure A."); + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }) + .unwrap(); + + // In the next block, the miner should win the tenure and submit a stacks block + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = mined_blocks.load(Ordering::SeqCst); + Ok(commits_count > commits_before && blocks_count > blocks_before) + }) + .unwrap(); + + let block_tenure_a = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let commits_before = commits_submitted.load(Ordering::SeqCst); + info!("Starting tenure B."); + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }) + .unwrap(); + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + info!("Commit op is submitted; unpause tenure B's block"); + + // Unpause the broadcast of Tenure B's block, do not submit commits. + TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + // Wait for a stacks block to be broadcasted + let start_time = Instant::now(); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < Duration::from_secs(30), + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + info!("Tenure B broadcasted a block. Issue the next bitcon block and unstall block commits."); + let block_tenure_b = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_b = blocks.last().unwrap(); + + info!("Starting tenure C."); + // Submit a block commit op for tenure C + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + next_block_and(&mut btc_regtest_controller, 60, || { + TEST_SKIP_COMMIT_OP.lock().unwrap().replace(false); + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = mined_blocks.load(Ordering::SeqCst); + Ok(commits_count > commits_before && blocks_count > blocks_before) + }) + .unwrap(); + + info!("Tenure C produced a block!"); + let block_tenure_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_c = blocks.last().unwrap(); + + // Now let's produce a second block for tenure C and ensure it builds off of block C. + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let start_time = Instant::now(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in Tenure C to mine a second block"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < Duration::from_secs(30), + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + info!("Tenure C produced a second block!"); + + let block_2_tenure_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_2_c = blocks.last().unwrap(); + + info!("Starting tenure D."); + // Submit a block commit op for tenure D and mine a stacks block + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = mined_blocks.load(Ordering::SeqCst); + Ok(commits_count > commits_before && blocks_count > blocks_before) + }) + .unwrap(); + + let block_tenure_d = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_d = blocks.last().unwrap(); + assert_ne!(block_tenure_b, block_tenure_a); + assert_ne!(block_tenure_b, block_tenure_c); + assert_ne!(block_tenure_c, block_tenure_a); + + // Block B was built atop block A + assert_eq!( + block_tenure_b.stacks_block_height, + block_tenure_a.stacks_block_height + 1 + ); + assert_eq!( + block_b.parent_block_id, + block_tenure_a.index_block_hash().to_string() + ); + + // Block C was built AFTER Block B was built, but BEFORE it was broadcasted, so it should be built off of Block A + assert_eq!( + block_tenure_c.stacks_block_height, + block_tenure_a.stacks_block_height + 1 + ); + assert_eq!( + block_c.parent_block_id, + block_tenure_a.index_block_hash().to_string() + ); + + assert_ne!(block_tenure_c, block_2_tenure_c); + assert_ne!(block_2_tenure_c, block_tenure_d); + assert_ne!(block_tenure_c, block_tenure_d); + + // Second block of tenure C builds off of block C + assert_eq!( + block_2_tenure_c.stacks_block_height, + block_tenure_c.stacks_block_height + 1, + ); + assert_eq!( + block_2_c.parent_block_id, + block_tenure_c.index_block_hash().to_string() + ); + + // Tenure D builds off of the second block of tenure C + assert_eq!( + block_tenure_d.stacks_block_height, + block_2_tenure_c.stacks_block_height + 1, + ); + assert_eq!( + block_d.parent_block_id, + block_2_tenure_c.index_block_hash().to_string() + ); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test makes three assertions: +/// * 5 tenures are mined after 3.0 starts +/// * Each tenure has 10 blocks (the coinbase block and 9 interim blocks) +/// * Verifies the block heights of the blocks mined +fn check_block_heights() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let deploy_fee = 3000; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 3 * deploy_fee + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + let mut sender_nonce = 0; + + // Deploy this version with the Clarity 1 / 2 before epoch 3 + let contract0_name = "test-contract-0"; + let contract_clarity1 = + "(define-read-only (get-heights) { burn-block-height: burn-block-height, block-height: block-height })"; + + let contract_tx0 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract0_name, + contract_clarity1, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx0); + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + Some(&signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + let heights0_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-heights", + vec![], + ); + let heights0 = heights0_value.expect_tuple().unwrap(); + info!("Heights from pre-epoch 3.0: {}", heights0); + + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + let info = get_chain_info_result(&naka_conf).unwrap(); + println!("Chain info: {:?}", info); + let mut last_burn_block_height = info.burn_block_height as u128; + let mut last_stacks_block_height = info.stacks_tip_height as u128; + let mut last_tenure_height = last_stacks_block_height as u128; + + let heights0_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-heights", + vec![], + ); + let heights0 = heights0_value.expect_tuple().unwrap(); + info!("Heights from epoch 3.0 start: {}", heights0); + assert_eq!( + heights0 + .get("burn-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap() + + 3, + last_burn_block_height, + "Burn block height should match" + ); + assert_eq!( + heights0 + .get("block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(), + last_stacks_block_height, + "Stacks block height should match" + ); + + // This version uses the Clarity 1 / 2 keywords + let contract1_name = "test-contract-1"; + let contract_tx1 = make_contract_publish_versioned( + &sender_sk, + sender_nonce, + deploy_fee, + contract1_name, + contract_clarity1, + Some(ClarityVersion::Clarity2), + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx1); + + // This version uses the Clarity 3 keywords + let contract3_name = "test-contract-3"; + let contract_clarity3 = + "(define-read-only (get-heights) { burn-block-height: burn-block-height, stacks-block-height: stacks-block-height, tenure-height: tenure-height })"; + + let contract_tx3 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract3_name, + contract_clarity3, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx3); + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + info!("Mining tenure {}", tenure_ix); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let heights1_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-heights", + vec![], + ); + let heights1 = heights1_value.expect_tuple().unwrap(); + info!("Heights from Clarity 1: {}", heights1); + + let heights3_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-heights", + vec![], + ); + let heights3 = heights3_value.expect_tuple().unwrap(); + info!("Heights from Clarity 3: {}", heights3); + + let bbh1 = heights1 + .get("burn-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + let bbh3 = heights3 + .get("burn-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + assert_eq!(bbh1, bbh3, "Burn block heights should match"); + if tenure_ix == 0 { + // Add two for the 2 blocks with no tenure during Nakamoto bootup + last_burn_block_height = bbh1 + 2; + } else { + assert_eq!( + bbh1, last_burn_block_height, + "Burn block height should not have changed yet" + ); + } + + let bh1 = heights1 + .get("block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + let bh3 = heights3 + .get("tenure-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + assert_eq!( + bh1, bh3, + "Clarity 2 block-height should match Clarity 3 tenure-height" + ); + assert_eq!( + bh1, + last_tenure_height + 1, + "Tenure height should have incremented" + ); + last_tenure_height = bh1; + + let sbh = heights3 + .get("stacks-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + assert_eq!( + sbh, + last_stacks_block_height + 1, + "Stacks block heights should have incremented" + ); + last_stacks_block_height = sbh; + + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + info!("Mining interim block {interim_block_ix}"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let heights1_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-heights", + vec![], + ); + let heights1 = heights1_value.expect_tuple().unwrap(); + info!("Heights from Clarity 1: {}", heights1); + + let heights3_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-heights", + vec![], + ); + let heights3 = heights3_value.expect_tuple().unwrap(); + info!("Heights from Clarity 3: {}", heights3); + + let bbh1 = heights1 + .get("burn-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + let bbh3 = heights3 + .get("burn-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + assert_eq!(bbh1, bbh3, "Burn block heights should match"); + if interim_block_ix == 0 { + assert_eq!( + bbh1, + last_burn_block_height + 1, + "Burn block heights should have incremented" + ); + last_burn_block_height = bbh1; + } else { + assert_eq!( + bbh1, last_burn_block_height, + "Burn block heights should not have incremented" + ); + } + + let bh1 = heights1 + .get("block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + let bh3 = heights3 + .get("tenure-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + assert_eq!( + bh1, bh3, + "Clarity 2 block-height should match Clarity 3 tenure-height" + ); + assert_eq!( + bh1, last_tenure_height, + "Tenure height should not have changed" + ); + + let sbh = heights3 + .get("stacks-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + assert_eq!( + sbh, + last_stacks_block_height + 1, + "Stacks block heights should have incremented" + ); + last_stacks_block_height = sbh; + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert_eq!( + tip.stacks_block_height, + block_height_pre_3_0 + ((inter_blocks_per_tenure + 1) * tenure_count), + "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" + ); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 2dce43b661..ff3bc9b3d5 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -8,10 +8,12 @@ use std::{cmp, env, fs, io, thread}; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; +use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value, MAX_CALL_STACK_DEPTH}; use rand::{Rng, RngCore}; use rusqlite::types::ToSql; +use serde::Deserialize; use serde_json::json; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::bitcoin::BitcoinNetworkType; @@ -36,6 +38,7 @@ use stacks::chainstate::stacks::{ StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; +use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ self, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, @@ -63,7 +66,6 @@ use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, }; use stacks_common::address::AddressHashMode; -use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; @@ -149,7 +151,7 @@ fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAdd burnchain.peer_host = Some("127.0.0.1".to_string()); } - let magic_bytes = Config::from_config_file(cfile) + let magic_bytes = Config::from_config_file(cfile, false) .unwrap() .burnchain .magic_bytes; @@ -856,6 +858,49 @@ pub fn get_tip_anchored_block(conf: &Config) -> (ConsensusHash, StacksBlock) { (stacks_tip_consensus_hash, block) } +#[derive(Deserialize, Debug)] +struct ReadOnlyResponse { + #[serde(rename = "okay")] + _okay: bool, + #[serde(rename = "result")] + result_hex: String, +} + +impl ReadOnlyResponse { + pub fn result(&self) -> Result { + Value::try_deserialize_hex_untyped(&self.result_hex) + } +} + +pub fn call_read_only( + conf: &Config, + principal: &StacksAddress, + contract: &str, + function: &str, + args: Vec<&str>, +) -> Value { + let http_origin = format!("http://{}", &conf.node.rpc_bind); + let client = reqwest::blocking::Client::new(); + + let path = format!( + "{http_origin}/v2/contracts/call-read/{}/{}/{}", + principal, contract, function + ); + let body = json!({ + "arguments": args, + "sender": principal.to_string(), + }); + let response: ReadOnlyResponse = client + .post(path) + .header("Content-Type", "application/json") + .body(body.to_string()) + .send() + .unwrap() + .json() + .unwrap(); + response.result().unwrap() +} + fn find_microblock_privkey( conf: &Config, pubkey_hash: &Hash160, diff --git a/testnet/stacks-node/src/tests/signer.rs b/testnet/stacks-node/src/tests/signer.rs index 01024343db..08cb254ec8 100644 --- a/testnet/stacks-node/src/tests/signer.rs +++ b/testnet/stacks-node/src/tests/signer.rs @@ -1,17 +1,14 @@ use std::collections::HashSet; use std::net::ToSocketAddrs; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; -use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use std::{env, thread}; use clarity::boot_util::boot_code_id; use clarity::vm::Value; -use libsigner::{ - BlockResponse, MessageSlotID, RejectCode, RunningSigner, Signer, SignerEventReceiver, - SignerMessage, -}; +use libsigner::v1::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMessage}; +use libsigner::{BlockProposal, SignerEntries}; use rand::thread_rng; use rand_core::RngCore; use stacks::burnchains::Txid; @@ -40,15 +37,17 @@ use stacks_common::types::chainstate::{ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; -use stacks_signer::client::{StackerDB, StacksClient}; +use stacks_signer::client::{SignerSlotID, StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::runloop::RunLoopCommand; -use stacks_signer::signer::{Command as SignerCommand, SignerSlotID}; +use stacks_signer::runloop::{RunLoopCommand, SignerCommand}; +use stacks_signer::v1::coordinator::CoordinatorSelector; +use stacks_signer::v1::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; -use wsts::state_machine::OperationResult; +use wsts::net::Message; +use wsts::state_machine::{OperationResult, PublicKeys}; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use crate::event_dispatcher::MinedNakamotoBlockEvent; @@ -56,8 +55,9 @@ use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_3_reward_set, naka_neon_integration_conf, next_block_and, - next_block_and_mine_commit, POX_4_DEFAULT_STACKER_BALANCE, + boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, + naka_neon_integration_conf, next_block_and, next_block_and_mine_commit, + POX_4_DEFAULT_STACKER_BALANCE, }; use crate::tests::neon_integrations::{ next_block_and_wait, run_until_burnchain_height, test_observer, wait_for_runloop, @@ -82,12 +82,8 @@ struct RunningNodes { struct SignerTest { // The stx and bitcoin nodes and their run loops pub running_nodes: RunningNodes, - // The channels for sending commands to the signers - pub signer_cmd_senders: Vec>, - // The channels for receiving results from the signers - pub result_receivers: Vec>>, - // The running signer and its threads - pub running_signers: Vec>>, + // The spawned signers and their threads + pub spawned_signers: Vec, // the private keys of the signers pub signer_stacks_private_keys: Vec, // link to the stacks node @@ -120,23 +116,20 @@ impl SignerTest { password, run_stamp, 3000, + Some(100_000), + None, + Some(9000), ); - let mut running_signers = Vec::new(); - let mut signer_cmd_senders = Vec::new(); - let mut result_receivers = Vec::new(); - for i in 0..num_signers { - let (cmd_send, cmd_recv) = channel(); - let (res_send, res_recv) = channel(); - info!("spawn signer"); - running_signers.push(spawn_signer( - &signer_configs[i as usize], - cmd_recv, - res_send, - )); - signer_cmd_senders.push(cmd_send); - result_receivers.push(res_recv); - } + let spawned_signers: Vec<_> = (0..num_signers) + .into_iter() + .map(|i| { + info!("spawning signer"); + let signer_config = + SignerConfig::load_from_str(&signer_configs[i as usize]).unwrap(); + SpawnedSigner::from(signer_config) + }) + .collect(); // Setup the nodes and deploy the contract to it let node = setup_stx_btc_node(naka_conf, &signer_stacks_private_keys, &signer_configs); @@ -145,9 +138,7 @@ impl SignerTest { Self { running_nodes: node, - result_receivers, - signer_cmd_senders, - running_signers, + spawned_signers, signer_stacks_private_keys, stacks_client, run_stamp, @@ -425,10 +416,11 @@ impl SignerTest { debug!("Waiting for DKG..."); let mut key = Point::default(); let dkg_now = Instant::now(); - for recv in self.result_receivers.iter() { + for signer in self.spawned_signers.iter() { let mut aggregate_public_key = None; loop { - let results = recv + let results = signer + .res_recv .recv_timeout(timeout) .expect("failed to recv dkg results"); for result in results { @@ -496,6 +488,34 @@ impl SignerTest { .expect("FATAL: signer not registered") } + fn get_signer_public_keys(&self, reward_cycle: u64) -> PublicKeys { + let entries = self + .stacks_client + .get_reward_set_signers(reward_cycle) + .unwrap() + .unwrap(); + let entries = SignerEntries::parse(false, &entries).unwrap(); + entries.public_keys + } + + #[allow(dead_code)] + fn get_signer_metrics(&self) -> String { + #[cfg(feature = "monitoring_prom")] + { + let client = reqwest::blocking::Client::new(); + let res = client + .get("http://localhost:9000/metrics") + .send() + .unwrap() + .text() + .unwrap(); + + return res; + } + #[cfg(not(feature = "monitoring_prom"))] + return String::new(); + } + fn generate_invalid_transactions(&self) -> Vec { let host = self .running_nodes @@ -549,7 +569,7 @@ impl SignerTest { None, ), }; - let invalid_contract_address = StacksClient::build_signed_contract_call_transaction( + let invalid_contract_address = StacksClient::build_unsigned_contract_call_transaction( &StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&signer_private_key)), contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -558,11 +578,10 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); - let invalid_contract_name = StacksClient::build_signed_contract_call_transaction( + let invalid_contract_name = StacksClient::build_unsigned_contract_call_transaction( &contract_addr, "bad-signers-contract-name".into(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -571,11 +590,10 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); - let invalid_signers_vote_function = StacksClient::build_signed_contract_call_transaction( + let invalid_signers_vote_function = StacksClient::build_unsigned_contract_call_transaction( &contract_addr, contract_name.clone(), "some-other-function".into(), @@ -584,12 +602,11 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); let invalid_function_arg_signer_index = - StacksClient::build_signed_contract_call_transaction( + StacksClient::build_unsigned_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -603,11 +620,10 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); - let invalid_function_arg_key = StacksClient::build_signed_contract_call_transaction( + let invalid_function_arg_key = StacksClient::build_unsigned_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -621,11 +637,10 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); - let invalid_function_arg_round = StacksClient::build_signed_contract_call_transaction( + let invalid_function_arg_round = StacksClient::build_unsigned_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -639,12 +654,11 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); let invalid_function_arg_reward_cycle = - StacksClient::build_signed_contract_call_transaction( + StacksClient::build_unsigned_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -658,11 +672,10 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); - let invalid_nonce = StacksClient::build_signed_contract_call_transaction( + let invalid_nonce = StacksClient::build_unsigned_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -671,7 +684,6 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 0, // Old nonce - 10, ) .unwrap(); @@ -682,10 +694,10 @@ impl SignerTest { false, ); let invalid_signer_tx = invalid_stacks_client - .build_vote_for_aggregate_public_key(0, round, point, reward_cycle, None, 0) + .build_unsigned_vote_for_aggregate_public_key(0, round, point, reward_cycle, 0) .expect("FATAL: failed to build vote for aggregate public key"); - vec![ + let unsigned_txs = vec![ invalid_nonce, invalid_not_contract_call, invalid_contract_name, @@ -696,7 +708,15 @@ impl SignerTest { invalid_function_arg_round, invalid_function_arg_signer_index, invalid_signer_tx, - ] + ]; + unsigned_txs + .into_iter() + .map(|unsigned| { + invalid_stacks_client + .sign_transaction(unsigned) + .expect("Failed to sign transaction") + }) + .collect() } /// Kills the signer runloop at index `signer_idx` @@ -705,12 +725,10 @@ impl SignerTest { /// # Panics /// Panics if `signer_idx` is out of bounds fn stop_signer(&mut self, signer_idx: usize) -> StacksPrivateKey { - let running_signer = self.running_signers.remove(signer_idx); - self.signer_cmd_senders.remove(signer_idx); - self.result_receivers.remove(signer_idx); + let spawned_signer = self.spawned_signers.remove(signer_idx); let signer_key = self.signer_stacks_private_keys.remove(signer_idx); - running_signer.stop(); + spawned_signer.stop(); signer_key } @@ -724,19 +742,17 @@ impl SignerTest { "12345", // It worked sir, we have the combination! -Great, what's the combination? self.run_stamp, 3000 + signer_idx, + Some(100_000), + None, + Some(9000 + signer_idx), ) .pop() .unwrap(); - let (cmd_send, cmd_recv) = channel(); - let (res_send, res_recv) = channel(); - info!("Restarting signer"); - let signer = spawn_signer(&signer_config, cmd_recv, res_send); - - self.result_receivers.insert(signer_idx, res_recv); - self.signer_cmd_senders.insert(signer_idx, cmd_send); - self.running_signers.insert(signer_idx, signer); + let config = SignerConfig::load_from_str(&signer_config).unwrap(); + let signer = SpawnedSigner::from(config); + self.spawned_signers.insert(signer_idx, signer); } fn shutdown(self) { @@ -750,32 +766,13 @@ impl SignerTest { .run_loop_stopper .store(false, Ordering::SeqCst); // Stop the signers before the node to prevent hanging - for signer in self.running_signers { + for signer in self.spawned_signers { assert!(signer.stop().is_none()); } self.running_nodes.run_loop_thread.join().unwrap(); } } -fn spawn_signer( - data: &str, - receiver: Receiver, - sender: Sender>, -) -> RunningSigner> { - let config = SignerConfig::load_from_str(data).unwrap(); - let ev = SignerEventReceiver::new(config.network.is_mainnet()); - let endpoint = config.endpoint; - let runloop: stacks_signer::runloop::RunLoop = stacks_signer::runloop::RunLoop::from(config); - let mut signer: Signer< - RunLoopCommand, - Vec, - stacks_signer::runloop::RunLoop, - SignerEventReceiver, - > = Signer::new(runloop, ev, receiver, sender); - info!("Spawning signer on endpoint {}", endpoint); - signer.spawn(endpoint).unwrap() -} - fn setup_stx_btc_node( mut naka_conf: NeonConfig, signer_stacks_private_keys: &[StacksPrivateKey], @@ -942,8 +939,9 @@ fn stackerdb_dkg() { // Determine the coordinator of the current node height info!("signer_runloop: spawn send commands to do dkg"); let dkg_now = Instant::now(); - for sender in signer_test.signer_cmd_senders.iter() { - sender + for signer in signer_test.spawned_signers.iter() { + signer + .cmd_send .send(RunLoopCommand { reward_cycle, command: SignerCommand::Dkg, @@ -959,8 +957,8 @@ fn stackerdb_dkg() { #[test] #[ignore] -/// Test the signer can respond to external commands to perform DKG -fn stackerdb_sign() { +/// Test the signer rejects requests to sign that do not come from a miner +fn stackerdb_sign_request_rejected() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1033,13 +1031,23 @@ fn stackerdb_sign() { info!("------------------------- Test Sign -------------------------"); let reward_cycle = signer_test.get_current_reward_cycle(); + let block_proposal_1 = BlockProposal { + block: block1.clone(), + burn_height: 0, + reward_cycle, + }; + let block_proposal_2 = BlockProposal { + block: block2.clone(), + burn_height: 0, + reward_cycle, + }; // Determine the coordinator of the current node height info!("signer_runloop: spawn send commands to do sign"); let sign_now = Instant::now(); let sign_command = RunLoopCommand { reward_cycle, command: SignerCommand::Sign { - block: block1, + block_proposal: block_proposal_1, is_taproot: false, merkle_root: None, }, @@ -1047,16 +1055,18 @@ fn stackerdb_sign() { let sign_taproot_command = RunLoopCommand { reward_cycle, command: SignerCommand::Sign { - block: block2, + block_proposal: block_proposal_2, is_taproot: true, merkle_root: None, }, }; - for sender in signer_test.signer_cmd_senders.iter() { - sender + for signer in signer_test.spawned_signers.iter() { + signer + .cmd_send .send(sign_command.clone()) .expect("failed to send sign command"); - sender + signer + .cmd_send .send(sign_taproot_command.clone()) .expect("failed to send sign taproot command"); } @@ -1094,6 +1104,167 @@ fn stackerdb_sign() { info!("Sign Time Elapsed: {:.2?}", sign_elapsed); } +#[test] +#[ignore] +/// Test that a signer can be offline when a DKG round has commenced and +/// can rejoin the DKG round after it has restarted +fn stackerdb_delayed_dkg() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let timeout = Duration::from_secs(200); + let num_signers = 3; + let mut signer_test = SignerTest::new(num_signers); + boot_to_epoch_3_reward_set_calculation_boundary( + &signer_test.running_nodes.conf, + &signer_test.running_nodes.blocks_processed, + &signer_test.signer_stacks_private_keys, + &signer_test.signer_stacks_private_keys, + &mut signer_test.running_nodes.btc_regtest_controller, + ); + let reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); + let public_keys = signer_test.get_signer_public_keys(reward_cycle); + let coordinator_selector = CoordinatorSelector::from(public_keys); + let (_, coordinator_public_key) = coordinator_selector.get_coordinator(); + let coordinator_public_key = + StacksPublicKey::from_slice(coordinator_public_key.to_bytes().as_slice()).unwrap(); + let signer_slot_ids: Vec<_> = (0..num_signers) + .into_iter() + .map(|i| SignerSlotID(i as u32)) + .collect(); + let mut stackerdbs: Vec<_> = signer_slot_ids + .iter() + .map(|i| { + StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // Doesn't matter what key we use. We are just reading, not writing + false, + reward_cycle, + *i, + ) + }) + .collect(); + info!("------------------------- Stop Signers -------------------------"); + let mut to_stop = None; + for (idx, key) in signer_test.signer_stacks_private_keys.iter().enumerate() { + let public_key = StacksPublicKey::from_private(key); + if public_key == coordinator_public_key { + // Do not stop the coordinator. We want coordinator to start a DKG round + continue; + } + // Only stop one signer + to_stop = Some(idx); + break; + } + let signer_idx = to_stop.expect("Failed to find a signer to stop"); + let signer_key = signer_test.stop_signer(signer_idx); + debug!( + "Removed signer {signer_idx} with key: {:?}, {}", + signer_key, + signer_key.to_hex() + ); + info!("------------------------- Start DKG -------------------------"); + info!("Waiting for DKG to start..."); + // Advance one more to trigger DKG + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + timeout.as_secs(), + || Ok(true), + ) + .expect("Failed to mine bitcoin block"); + // Do not proceed until we guarantee that DKG was triggered + let start_time = Instant::now(); + loop { + let stackerdb = stackerdbs.first_mut().unwrap(); + let dkg_packets: Vec<_> = stackerdb + .get_dkg_packets(&signer_slot_ids) + .expect("Failed to get dkg packets"); + let begin_packets: Vec<_> = dkg_packets + .iter() + .filter_map(|packet| { + if matches!(packet.msg, Message::DkgBegin(_)) { + Some(packet) + } else { + None + } + }) + .collect(); + if !begin_packets.is_empty() { + break; + } + assert!( + start_time.elapsed() < Duration::from_secs(30), + "Timed out waiting for DKG to be triggered" + ); + } + + info!("------------------------- Restart Stopped Signer -------------------------"); + + signer_test.restart_signer(signer_idx, signer_key); + + info!("------------------------- Wait for DKG -------------------------"); + let key = signer_test.wait_for_dkg(timeout); + let mut transactions = HashSet::with_capacity(num_signers); + let start_time = Instant::now(); + while transactions.len() < num_signers { + for stackerdb in stackerdbs.iter_mut() { + let current_transactions = stackerdb + .get_current_transactions() + .expect("Failed getting current transactions for signer slot id"); + for tx in current_transactions { + transactions.insert(tx.txid()); + } + } + assert!( + start_time.elapsed() < Duration::from_secs(30), + "Failed to retrieve pending vote transactions within timeout" + ); + } + + // Make sure transactions get mined + let start_time = Instant::now(); + while !transactions.is_empty() { + assert!( + start_time.elapsed() < Duration::from_secs(30), + "Failed to mine transactions within timeout" + ); + next_block_and_wait( + &mut signer_test.running_nodes.btc_regtest_controller, + &signer_test.running_nodes.blocks_processed, + ); + let blocks = test_observer::get_blocks(); + for block in blocks.iter() { + let txs = block.get("transactions").unwrap().as_array().unwrap(); + for tx in txs.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + transactions.remove(&parsed.txid()); + } + } + } + + // Make sure DKG did get set + assert_eq!( + key, + signer_test + .stacks_client + .get_approved_aggregate_key(reward_cycle) + .expect("Failed to get approved aggregate key") + .expect("No approved aggregate key found") + ); +} + pub fn find_block_response(chunk_events: Vec) -> Option { for event in chunk_events.into_iter() { if event.contract_id.name.as_str() @@ -1141,7 +1312,8 @@ fn stackerdb_block_proposal() { .init(); info!("------------------------- Test Setup -------------------------"); - let mut signer_test = SignerTest::new(5); + let num_signers = 5; + let mut signer_test = SignerTest::new(num_signers); let timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(30); @@ -1159,6 +1331,17 @@ fn stackerdb_block_proposal() { .0 .verify(&key, proposed_signer_signature_hash.as_bytes())); + // Test prometheus metrics response + #[cfg(feature = "monitoring_prom")] + { + let metrics_response = signer_test.get_signer_metrics(); + + // Because 5 signers are running in the same process, the prometheus metrics + // are incremented once for every signer. This is why we expect the metric to be + // `5`, even though there is only one block proposed. + let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); + assert!(metrics_response.contains(&expected_result)); + } signer_test.shutdown(); } @@ -1350,7 +1533,8 @@ fn stackerdb_sign_after_signer_reboot() { .init(); info!("------------------------- Test Setup -------------------------"); - let mut signer_test = SignerTest::new(3); + let num_signers = 3; + let mut signer_test = SignerTest::new(num_signers); let timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(30); @@ -1379,11 +1563,20 @@ fn stackerdb_sign_after_signer_reboot() { info!("------------------------- Test Mine Block after restart -------------------------"); - signer_test.mine_nakamoto_block(timeout); + let last_block = signer_test.mine_nakamoto_block(timeout); let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); let frost_signature = signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, short_timeout); + // Check that the latest block's bitvec is all 1's + assert_eq!( + last_block.signer_bitvec, + serde_json::to_value(BitVec::<4000>::ones(num_signers as u16).unwrap()) + .expect("Failed to serialize BitVec") + .as_str() + .expect("Failed to serialize BitVec") + ); + assert!( frost_signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), "Signature verification failed"