diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..abcdea7256 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,21 @@ + + +### Description + +### Applicable issues +- fixes # + +### Additional info (benefits, drawbacks, caveats) + +### Checklist +- [ ] Test coverage for new or modified code paths +- [ ] Changelog is updated +- [ ] Required documentation changes (e.g., `docs/rpc/openapi.yaml` and `rpc-endpoints.md` for v2 endpoints, `event-dispatcher.md` for new events) +- [ ] New clarity functions have corresponding PR in `clarity-benchmarking` repo +- [ ] New integration test(s) added to `bitcoin-tests.yml` diff --git a/.github/actions/bitcoin-int-tests/Dockerfile.code-cov b/.github/actions/bitcoin-int-tests/Dockerfile.code-cov index fc3e391ae8..7a787a464e 100644 --- a/.github/actions/bitcoin-int-tests/Dockerfile.code-cov +++ b/.github/actions/bitcoin-int-tests/Dockerfile.code-cov @@ -2,7 +2,7 @@ FROM rust:bullseye AS test WORKDIR /build -RUN rustup override set nightly && \ +RUN rustup override set nightly-2022-01-14 && \ rustup component add llvm-tools-preview && \ cargo install grcov diff --git a/.github/actions/bitcoin-int-tests/Dockerfile.generic.bitcoin-tests b/.github/actions/bitcoin-int-tests/Dockerfile.generic.bitcoin-tests index 904efb63c0..42a0235cf8 100644 --- a/.github/actions/bitcoin-int-tests/Dockerfile.generic.bitcoin-tests +++ b/.github/actions/bitcoin-int-tests/Dockerfile.generic.bitcoin-tests @@ -6,7 +6,7 @@ COPY . . WORKDIR /src/testnet/stacks-node -RUN rustup override set nightly && \ +RUN rustup override set nightly-2022-01-14 && \ rustup component add llvm-tools-preview && \ cargo install grcov diff --git a/.github/actions/bitcoin-int-tests/Dockerfile.large-genesis b/.github/actions/bitcoin-int-tests/Dockerfile.large-genesis index f0f80391e6..4f96fd304b 100644 --- a/.github/actions/bitcoin-int-tests/Dockerfile.large-genesis +++ b/.github/actions/bitcoin-int-tests/Dockerfile.large-genesis @@ -9,7 +9,7 @@ RUN cd / && tar -xvzf bitcoin-0.20.0-x86_64-linux-gnu.tar.gz RUN ln -s /bitcoin-0.20.0/bin/bitcoind /bin/ -RUN rustup override set nightly && \ +RUN rustup override set nightly-2022-01-14 && \ rustup component add llvm-tools-preview && \ cargo install grcov diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 1258e12c5c..baf6e91d17 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -5,6 +5,11 @@ name: stacks-bitcoin-integration-tests on: pull_request: +concurrency: + group: stacks-bitcoin-integration-tests-${{ github.ref }} + # Only cancel in progress if this is for a PR + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + jobs: build-integration-image: runs-on: ubuntu-latest @@ -43,6 +48,7 @@ jobs: - tests::neon_integrations::bitcoind_forking_test - tests::neon_integrations::should_fix_2771 - tests::neon_integrations::pox_integration_test + - tests::neon_integrations::mining_events_integration_test - tests::bitcoin_regtest::bitcoind_integration_test - tests::should_succeed_handling_malformed_and_valid_txs - tests::neon_integrations::size_overflow_unconfirmed_microblocks_integration_test @@ -53,10 +59,15 @@ jobs: - tests::neon_integrations::filter_low_fee_tx_integration_test - tests::neon_integrations::filter_long_runtime_tx_integration_test - tests::neon_integrations::mining_transactions_is_fair + - tests::neon_integrations::fuzzed_median_fee_rate_estimation_test_window5 + - tests::neon_integrations::fuzzed_median_fee_rate_estimation_test_window10 + - tests::neon_integrations::use_latest_tip_integration_test + - tests::neon_integrations::test_flash_block_skip_tenure - tests::epoch_205::test_dynamic_db_method_costs - tests::epoch_205::transition_empty_blocks - tests::epoch_205::test_cost_limit_switch_version205 - tests::epoch_205::test_exact_block_costs + - tests::epoch_205::bigger_microblock_streams_in_2_05 steps: - uses: actions/checkout@v2 - name: Download docker image diff --git a/.github/workflows/stacks-blockchain.yml b/.github/workflows/stacks-blockchain.yml index f124b9498b..7250d7fa8a 100644 --- a/.github/workflows/stacks-blockchain.yml +++ b/.github/workflows/stacks-blockchain.yml @@ -12,6 +12,11 @@ on: description: 'The tag to create (optional)' required: false +concurrency: + group: stacks-blockchain-${{ github.ref }} + # Only cancel in progress if this is for a PR + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + jobs: # Notify Slack channel of workflow start notify-start: @@ -244,7 +249,7 @@ jobs: - create-release strategy: matrix: - platform: [ windows-x64, macos-x64, linux-x64, linux-musl-x64, linux-armv7, linux-arm64 ] + platform: [ windows-x64, macos-x64, macos-arm64, linux-x64, linux-musl-x64, linux-armv7, linux-arm64 ] steps: - uses: actions/checkout@v2 diff --git a/CHANGELOG.md b/CHANGELOG.md index ed63bec8c9..a1bf897a4e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,79 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [2.05.0.1.0] + +### Added +- A new fee estimator intended to produce fewer over-estimates, by having less + sensitivity to outliers. Its characteristic features are: 1) use a window to + forget past estimates instead of exponential averaging, 2) use weighted + percentiles, so that bigger transactions influence the estimates more, 3) + assess empty space in blocks as having paid the "minimum fee", so that empty + space is accounted for, 4) use random "fuzz" so that in busy times the fees can + change dynamically. (#2972) +- Implements anti-entropy protocol for querying transactions from other + nodes' mempools. Before, nodes wouldn't sync mempool contents with one another. + (#2884) +- Structured logging in the mining code paths. This will shine light + on what happens to transactions (successfully added, skipped or errored) that the + miner considers while buildings blocks. (#2975) +- Added the mined microblock event, which includes information on transaction + events that occurred in the course of mining (will provide insight + on whether a transaction was successfully added to the block, + skipped, or had a processing error). (#2975) +- For v2 endpoints, can now specify the `tip` parameter to `latest`. If + `tip=latest`, the node will try to run the query off of the latest tip. (#2778) +- Adds the /v2/headers endpoint, which returns a sequence of SIP-003-encoded + block headers and consensus hashes (see the ExtendedStacksHeader struct that + this PR adds to represent this data). (#2862) +- Adds the /v2/data_var endpoint, which returns a contract's data variable + value and a MARF proof of its existence. (#2862) +- Fixed a bug in the unconfirmed state processing logic that could lead to a + denial of service (node crash) for nodes that mine microblocks (#2970) +- Added prometheus metric that tracks block fullness by logging the percentage of each + cost dimension that is consumed in a given block (#3025). + + +### Changed +- Updated the mined block event. It now includes information on transaction + events that occurred in the course of mining (will provide insight + on whether a transaction was successfully added to the block, + skipped, or had a processing error). (#2975) +- Updated some of the logic in the block assembly for the miner and the follower + to consolidate similar logic. Added functions `setup_block` and `finish_block`. + (#2946) +- Makes the p2p state machine more reactive to newly-arrived + `BlocksAvailable` and `MicroblocksAvailable` messages for block and microblock + streams that this node does not have. If such messages arrive during an inventory + sync, the p2p state machine will immediately transition from the inventory sync + work state to the block downloader work state, and immediately proceed to fetch + the available block or microblock stream. (#2862) +- Nodes will push recently-obtained blocks and microblock streams to outbound + neighbors if their cached inventories indicate that they do not yet have them +(#2986). +- Nodes will no longer perform full inventory scans on their peers, except + during boot-up, in a bid to minimize block-download stalls (#2986). +- Nodes will process sortitions in parallel to downloading the Stacks blocks for + a reward cycle, instead of doing these tasks sequentially (#2986). +- The node's runloop will coalesce and expire stale requests to mine blocks on + top of parent blocks that are no longer the chain tip (#2969). +- Several database indexes have been updated to avoid table scans, which + significantly improves most RPC endpoint speed and cuts node spin-up time in +half (#2989, #3005). +- Fixed a rare denial-of-service bug whereby a node that processes a very deep + burnchain reorg can get stuck, and be rendered unable to process further +sortitions. This has never happened in production, but it can be replicated in +tests (#2989). +- Updated what indices are created, and ensures that indices are created even + after the database is initialized (#3029). + +### Fixed +- Updates the lookup key for contracts in the pessimistic cost estimator. Before, contracts + published by different principals with the same name would have had the same + key in the cost estimator. (#2984) +- Fixed a few prometheus metrics to be more accurate compared to `/v2` endpoints + when polling data (#2987) + ## [2.05.0.0.0] This software update is a consensus changing release and the @@ -16,12 +89,12 @@ directories before the 2.05 consensus changes activate (Bitcoin height run a 2.05 node afterwards, you must start from a new chainstate directory. -## Added +### Added - At height 713,000 a new `costs-2` contract will be launched by the Stacks boot address. -## Changed +### Changed - Stacks blocks whose parents are mined >= 713,000 will use default costs from the new `costs-2` contract. @@ -31,7 +104,7 @@ directory. - Stacks blocks whose parents are mined >= 713,000 will use the new block limit defined in SIP-012. -## Fixed +### Fixed - Miners are now more aggressive in calculating their block limits when confirming microblocks (#2916) @@ -43,7 +116,7 @@ selection logic in the default miner to prioritize by an estimated fee rate instead of raw fee. This release's chainstate directory is compatible with chainstate directories from 2.0.11.3.0. -## Added +### Added - FeeEstimator and CostEstimator interfaces. These can be controlled via node configuration options. See the `README.md` for more @@ -51,7 +124,7 @@ compatible with chainstate directories from 2.0.11.3.0. - New fee rate estimation endpoint `/v2/fees/transaction` (#2872). See `docs/rpc/openapi.yaml` for more information. -## Changed +### Changed - Prioritize transaction inclusion in blocks by estimated fee rates (#2859). - MARF sqlite connections will now use `mmap`'ed connections with up to 256MB diff --git a/Cargo.lock b/Cargo.lock index 5d4d9bd55c..318e52caff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,18 +4,18 @@ version = 3 [[package]] name = "addr2line" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" dependencies = [ "gimli", ] [[package]] name = "adler" -version = "0.2.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "adler32" @@ -24,37 +24,79 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" [[package]] -name = "ahash" -version = "0.4.7" +name = "aead" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" +checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" +dependencies = [ + "generic-array 0.14.5", +] [[package]] -name = "aho-corasick" -version = "0.7.13" +name = "aes" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" +checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" dependencies = [ - "memchr", + "aes-soft", + "aesni", + "cipher", ] [[package]] -name = "anyhow" -version = "1.0.31" +name = "aes-gcm" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bb70cc08ec97ca5450e6eba421deeea5f172c0fc61f78b5357b2a8e8be195f" +checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da" +dependencies = [ + "aead", + "aes", + "cipher", + "ctr", + "ghash", + "subtle", +] [[package]] -name = "arrayref" -version = "0.3.6" +name = "aes-soft" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" +dependencies = [ + "cipher", + "opaque-debug 0.3.0", +] [[package]] -name = "arrayvec" -version = "0.5.1" +name = "aesni" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" +dependencies = [ + "cipher", + "opaque-debug 0.3.0", +] + +[[package]] +name = "ahash" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" + +[[package]] +name = "aho-corasick" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +dependencies = [ + "memchr", +] + +[[package]] +name = "anyhow" +version = "1.0.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" +checksum = "94a45b455c14666b85fc40a019e8ab9eb75e3a124e05494f5397122bc9eb06e0" [[package]] name = "assert-json-diff" @@ -69,67 +111,157 @@ dependencies = [ [[package]] name = "async-attributes" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd3d156917d94862e779f356c5acae312b08fd3121e792c857d7928c8088423" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" dependencies = [ "quote", "syn", ] +[[package]] +name = "async-channel" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" +dependencies = [ + "concurrent-queue", + "event-listener", + "futures-core", +] + +[[package]] +name = "async-dup" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7427a12b8dc09291528cfb1da2447059adb4a257388c2acd6497a79d55cf6f7c" +dependencies = [ + "futures-io", + "simple-mutex", +] + +[[package]] +name = "async-executor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand", + "futures-lite", + "once_cell", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" +dependencies = [ + "async-channel", + "async-executor", + "async-io", + "async-mutex", + "blocking", + "futures-lite", + "num_cpus", + "once_cell", +] + [[package]] name = "async-h1" -version = "1.0.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e5ab8d40149b94ac5d12a751a9d50d2a9f05526d176897a5bf23438862bdaab" +checksum = "8101020758a4fc3a7c326cb42aa99e9fa77cbfb76987c128ad956406fe1f70a7" dependencies = [ + "async-channel", + "async-dup", "async-std", - "byte-pool", "futures-core", "http-types", "httparse", - "lazy_static", - "log 0.4.11", - "pin-project-lite", - "url", + "log", + "pin-project", +] + +[[package]] +name = "async-io" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a811e6a479f2439f0c04038796b5cfb3d2ad56c230e0f2d3f7b04d68cfee607b" +dependencies = [ + "concurrent-queue", + "futures-lite", + "libc", + "log", + "once_cell", + "parking", + "polling", + "slab", + "socket2", + "waker-fn", + "winapi 0.3.9", +] + +[[package]] +name = "async-lock" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6a8ea61bf9947a1007c5cada31e647dbc77b103c679858150003ba697ea798b" +dependencies = [ + "event-listener", +] + +[[package]] +name = "async-mutex" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +dependencies = [ + "event-listener", ] [[package]] name = "async-std" -version = "1.5.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "538ecb01eb64eecd772087e5b6f7540cbc917f047727339a472dafed2185b267" +checksum = "f8056f1455169ab86dd47b47391e4ab0cbd25410a70e9fe675544f49bafaf952" dependencies = [ "async-attributes", - "async-task", - "broadcaster", - "crossbeam-channel 0.4.3", - "crossbeam-deque 0.7.3", - "crossbeam-utils 0.7.2", + "async-channel", + "async-global-executor", + "async-io", + "async-lock", + "crossbeam-utils", + "futures-channel", "futures-core", "futures-io", - "futures-timer", + "futures-lite", + "gloo-timers", "kv-log-macro", - "log 0.4.11", + "log", "memchr", - "mio", - "mio-uds", "num_cpus", "once_cell", "pin-project-lite", "pin-utils", "slab", + "wasm-bindgen-futures", ] [[package]] name = "async-task" -version = "1.3.1" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ac2c016b079e771204030951c366db398864f5026f84a44dafb0ff20f02085d" -dependencies = [ - "libc", - "winapi 0.3.9", -] +checksum = "677d306121baf53310a3fd342d88dc0824f6bbeace68347593658525565abee8" + +[[package]] +name = "atomic-waker" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" [[package]] name = "atty" @@ -144,24 +276,19 @@ dependencies = [ [[package]] name = "autocfg" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" - -[[package]] -name = "autocfg" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.50" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" +checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" dependencies = [ "addr2line", - "cfg-if 0.1.10", + "cc", + "cfg-if 1.0.0", "libc", "miniz_oxide", "object", @@ -170,21 +297,21 @@ dependencies = [ [[package]] name = "base-x" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" +checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" [[package]] name = "base64" -version = "0.11.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] name = "base64" -version = "0.12.3" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] name = "bitflags" @@ -192,17 +319,6 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" -[[package]] -name = "blake2b_simd" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" -dependencies = [ - "arrayref", - "arrayvec", - "constant_time_eq", -] - [[package]] name = "block-buffer" version = "0.7.3" @@ -212,7 +328,7 @@ dependencies = [ "block-padding", "byte-tools", "byteorder", - "generic-array 0.12.3", + "generic-array 0.12.4", ] [[package]] @@ -221,7 +337,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.5", ] [[package]] @@ -233,6 +349,20 @@ dependencies = [ "byte-tools", ] +[[package]] +name = "blocking" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "046e47d4b2d391b1f6f8b407b1deb8dee56c1852ccd868becf2710f601b5f427" +dependencies = [ + "async-channel", + "async-task", + "atomic-waker", + "fastrand", + "futures-lite", + "once_cell", +] + [[package]] name = "blockstack-core" version = "0.0.1" @@ -245,11 +375,11 @@ dependencies = [ "integer-sqrt", "lazy_static", "libc", - "mio", + "mio 0.6.23", "nix", "percent-encoding", "prometheus", - "rand 0.7.2", + "rand 0.7.3", "rand_chacha 0.2.2", "regex", "ripemd160", @@ -262,34 +392,21 @@ dependencies = [ "sha2 0.8.2", "sha2-asm", "sha3", + "siphasher", "slog", "slog-json", "slog-term", "stx-genesis", - "time 0.2.23", + "time 0.2.27", "url", "winapi 0.3.9", ] -[[package]] -name = "broadcaster" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9c972e21e0d055a36cf73e4daae870941fe7a8abcd5ac3396aab9e4c126bd87" -dependencies = [ - "futures-channel", - "futures-core", - "futures-sink", - "futures-util", - "parking_lot", - "slab", -] - [[package]] name = "bstr" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" dependencies = [ "lazy_static", "memchr", @@ -309,19 +426,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" - -[[package]] -name = "byte-pool" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9342e102eac8b1879fbedf9a7e0572c40b0cc5805b663c4d4ca791cae0bae221" -dependencies = [ - "crossbeam-queue", - "stable_deref_trait", -] +checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "byte-tools" @@ -331,15 +438,21 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "0.5.6" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" + +[[package]] +name = "cache-padded" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" [[package]] name = "cast" @@ -352,9 +465,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.66" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" [[package]] name = "cfg-if" @@ -377,10 +490,19 @@ dependencies = [ "libc", "num-integer", "num-traits", - "time 0.1.43", + "time 0.1.44", "winapi 0.3.9", ] +[[package]] +name = "cipher" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" +dependencies = [ + "generic-array 0.14.5", +] + [[package]] name = "clap" version = "2.34.0" @@ -403,11 +525,11 @@ dependencies = [ "integer-sqrt", "lazy_static", "libc", - "mio", + "mio 0.6.23", "nix", "percent-encoding", "prometheus", - "rand 0.7.2", + "rand 0.7.3", "rand_chacha 0.2.2", "regex", "ripemd160", @@ -420,11 +542,12 @@ dependencies = [ "sha2 0.8.2", "sha2-asm", "sha3", + "siphasher", "slog", "slog-json", "slog-term", "stx-genesis", - "time 0.2.23", + "time 0.2.27", "tini", "url", "winapi 0.3.9", @@ -440,40 +563,42 @@ dependencies = [ ] [[package]] -name = "cloudabi" -version = "0.0.3" +name = "concurrent-queue" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" dependencies = [ - "bitflags", + "cache-padded", ] [[package]] name = "const_fn" -version = "0.4.5" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" [[package]] name = "cookie" -version = "0.12.0" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "888604f00b3db336d2af898ec3c1d5d0ddf5e6d462220f2ededc33a87ac4bbd5" +checksum = "03a5d7b21829bc7b4bf4754a978a241ae54ea55a40f92bb20216e54096f4b951" dependencies = [ - "time 0.1.43", + "aes-gcm", + "base64 0.13.0", + "hkdf", + "hmac", + "percent-encoding", + "rand 0.8.4", + "sha2 0.9.9", + "time 0.2.27", + "version_check", ] [[package]] name = "core-foundation" -version = "0.7.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" +checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" dependencies = [ "core-foundation-sys", "libc", @@ -481,21 +606,30 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.7.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" + +[[package]] +name = "cpufeatures" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +dependencies = [ + "libc", +] [[package]] name = "cpuid-bool" -version = "0.1.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" +checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" dependencies = [ "cfg-if 1.0.0", ] @@ -536,16 +670,6 @@ dependencies = [ "itertools", ] -[[package]] -name = "crossbeam-channel" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ee0cc8804d5393478d743b035099520087a5186f3b93fa58cec08fa62407b6" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", -] - [[package]] name = "crossbeam-channel" version = "0.5.2" @@ -553,18 +677,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.6", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" -dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-utils", ] [[package]] @@ -574,68 +687,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.6", - "crossbeam-utils 0.8.6", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.8.2" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg 1.0.0", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.5", - "scopeguard", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97242a70df9b89a65d0b6df3c4bf5b9ce03c5b7309019777fbde37e7537f8762" +checksum = "c00d6d2ea26e8b151d99093005cb442fb9a37aeaca582a03ec70946f49ab5ed9" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.6", + "crossbeam-utils", "lazy_static", - "memoffset 0.6.4", + "memoffset", "scopeguard", ] -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - [[package]] name = "crossbeam-utils" -version = "0.7.2" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +checksum = "b5e5bed1f1c269533fa816a0a5492b3545209a205ca1a54842be180eb63a16a6" dependencies = [ - "autocfg 1.0.0", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "lazy_static", ] [[package]] -name = "crossbeam-utils" -version = "0.8.6" +name = "crypto-mac" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcae03edb34f947e64acdb1c33ec169824e20657e9ecb61cef6c8c74dcb8120" +checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" dependencies = [ - "cfg-if 1.0.0", - "lazy_static", + "generic-array 0.14.5", + "subtle", ] [[package]] @@ -646,7 +732,7 @@ checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" dependencies = [ "bstr", "csv-core", - "itoa", + "itoa 0.4.8", "ryu", "serde", ] @@ -660,6 +746,25 @@ dependencies = [ "memchr", ] +[[package]] +name = "ctor" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccc0a48a9b826acdf4028595adc9db92caea352f7af011a3034acd172a52a0aa" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "ctr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" +dependencies = [ + "cipher", +] + [[package]] name = "curve25519-dalek" version = "2.0.0" @@ -680,7 +785,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" dependencies = [ - "generic-array 0.12.3", + "generic-array 0.12.4", ] [[package]] @@ -689,24 +794,24 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.5", ] [[package]] -name = "dirs" -version = "2.0.2" +name = "dirs-next" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" dependencies = [ - "cfg-if 0.1.10", - "dirs-sys", + "cfg-if 1.0.0", + "dirs-sys-next", ] [[package]] -name = "dirs-sys" -version = "0.3.5" +name = "dirs-sys-next" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" dependencies = [ "libc", "redox_users", @@ -719,12 +824,6 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" -[[package]] -name = "dtoa" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" - [[package]] name = "ed25519-dalek" version = "1.0.0-pre.3" @@ -733,7 +832,7 @@ checksum = "978710b352437433c97b2bff193f2fb1dfd58a093f863dd95e225a19baa599a2" dependencies = [ "clear_on_drop", "curve25519-dalek", - "rand 0.7.2", + "rand 0.7.3", "serde", "sha2 0.8.2", ] @@ -746,13 +845,19 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.23" +version = "0.8.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ac63f94732332f44fe654443c46f6375d1939684c17b0afb6cb56b0456e171" +checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", ] +[[package]] +name = "event-listener" +version = "2.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71" + [[package]] name = "extend" version = "0.1.2" @@ -783,6 +888,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +dependencies = [ + "instant", +] + [[package]] name = "fnv" version = "1.0.7" @@ -805,10 +919,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] -name = "fuchsia-cprng" -version = "0.1.1" +name = "form_urlencoded" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +dependencies = [ + "matches", + "percent-encoding", +] [[package]] name = "fuchsia-zircon" @@ -826,26 +944,11 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" -[[package]] -name = "futures" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - [[package]] name = "futures-channel" -version = "0.3.5" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" dependencies = [ "futures-core", "futures-sink", @@ -853,121 +956,133 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.5" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" - -[[package]] -name = "futures-executor" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] +checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" [[package]] name = "futures-io" -version = "0.3.5" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" +checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" [[package]] -name = "futures-macro" -version = "0.3.5" +name = "futures-lite" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" +checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "syn", + "fastrand", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", ] [[package]] -name = "futures-sink" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" - -[[package]] -name = "futures-task" -version = "0.3.5" +name = "futures-sink" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" -dependencies = [ - "once_cell", -] +checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" [[package]] -name = "futures-timer" -version = "2.0.2" +name = "futures-task" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" +checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" [[package]] name = "futures-util" -version = "0.3.5" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" dependencies = [ - "futures-channel", "futures-core", "futures-io", - "futures-macro", "futures-sink", "futures-task", "memchr", - "pin-project", + "pin-project-lite", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] [[package]] name = "generic-array" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" dependencies = [ "typenum", ] [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ "typenum", - "version_check 0.9.2", + "version_check", ] [[package]] name = "getrandom" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" +dependencies = [ + "cfg-if 1.0.0", "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", +] + +[[package]] +name = "ghash" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" +dependencies = [ + "opaque-debug 0.3.0", + "polyval", ] [[package]] name = "gimli" -version = "0.22.0" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" + +[[package]] +name = "gloo-timers" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" +checksum = "4d12a7f4e95cfe710f1d624fb1210b7d961a5fb05c4fd942f4feab06e61f590e" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] [[package]] name = "h2" -version = "0.2.6" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993f9e0baeed60001cf565546b0d3dbe6a6ad23f2bd31644a133c641eccf6d53" +checksum = "d9f1f717ddc7b2ba36df7e871fd88db79326551d3d6f1fc406fbfd28b582ff8e" dependencies = [ "bytes", "fnv", @@ -997,29 +1112,35 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" + [[package]] name = "hashlink" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d99cf782f0dc4372d26846bec3de7804ceb5df083c2d4462c0b8d2330e894fa8" dependencies = [ - "hashbrown", + "hashbrown 0.9.1", ] [[package]] name = "headers" -version = "0.3.2" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed18eb2459bf1a09ad2d6b1547840c3e5e62882fa09b9a6a20b1de8e3228848f" +checksum = "c84c647447a07ca16f5fbd05b633e535cc41a08d2d74ab1e08648df53be9cb89" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "bitflags", "bytes", "headers-core", "http", - "mime 0.3.16", + "httpdate", + "mime", "sha-1", - "time 0.1.43", ] [[package]] @@ -1033,60 +1154,94 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.15" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] +[[package]] +name = "hkdf" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51ab2f639c231793c5f6114bdb9bbe50a7dbbfcd7c7c6bd8475dec2d991e964f" +dependencies = [ + "digest 0.9.0", + "hmac", +] + +[[package]] +name = "hmac" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" +dependencies = [ + "crypto-mac", + "digest 0.9.0", +] + [[package]] name = "http" -version = "0.2.1" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 1.0.1", ] [[package]] name = "http-body" -version = "0.3.1" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes", "http", + "pin-project-lite", ] [[package]] name = "http-types" -version = "1.2.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05af75a78dfeb163d472b1d27bebb6a8845917a069accdf53a9bed47aaff9bfc" +checksum = "6e9b187a72d63adbfba487f48095306ac823049cb504ee195541e91c7775f5ad" dependencies = [ "anyhow", + "async-channel", "async-std", + "base64 0.13.0", "cookie", + "futures-lite", "infer", - "omnom", "pin-project-lite", + "rand 0.7.3", + "serde", + "serde_json", + "serde_qs", + "serde_urlencoded", "url", ] [[package]] name = "httparse" -version = "1.3.4" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" + +[[package]] +name = "httpdate" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.13.7" +version = "0.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb" +checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" dependencies = [ "bytes", "futures-channel", @@ -1096,10 +1251,10 @@ dependencies = [ "http", "http-body", "httparse", - "itoa", - "pin-project", + "httpdate", + "itoa 0.4.8", + "pin-project-lite", "socket2", - "time 0.1.43", "tokio", "tower-service", "tracing", @@ -1108,22 +1263,22 @@ dependencies = [ [[package]] name = "hyper-tls" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", "hyper", "native-tls", "tokio", - "tokio-tls", + "tokio-native-tls", ] [[package]] name = "idna" -version = "0.2.0" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" dependencies = [ "matches", "unicode-bidi", @@ -1132,33 +1287,37 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.4.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c398b2b113b55809ceb9ee3e753fcbac793f1956663f3c36549c1346015c2afe" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ - "autocfg 1.0.0", + "autocfg", + "hashbrown 0.11.2", ] [[package]] name = "infer" -version = "0.1.7" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6854dd77ddc4f9ba1a448f487e27843583d407648150426a30c2ea3a2c39490a" +checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" [[package]] -name = "input_buffer" -version = "0.3.1" +name = "instant" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19a8a95243d5a0398cae618ec29477c6e3cb631152be5c19481f80bc71559754" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "bytes", + "cfg-if 1.0.0", ] [[package]] name = "integer-sqrt" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f65877bf7d44897a473350b1046277941cee20b263397e90869c50b6e766088b" +checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" +dependencies = [ + "num-traits", +] [[package]] name = "iovec" @@ -1169,6 +1328,12 @@ dependencies = [ "libc", ] +[[package]] +name = "ipnet" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" + [[package]] name = "itertools" version = "0.10.3" @@ -1180,9 +1345,15 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" + +[[package]] +name = "itoa" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" [[package]] name = "js-sys" @@ -1215,7 +1386,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" dependencies = [ - "log 0.4.11", + "log", ] [[package]] @@ -1226,27 +1397,29 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.105" +version = "0.2.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "869d572136620d55835903746bcb5cdc54cb2851fd0aeec53220b4bb65ef3013" +checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c" [[package]] name = "libflate" -version = "1.0.3" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389de7875e06476365974da3e7ff85d55f1972188ccd9f6020dd7c8156e17914" +checksum = "d2d57e534717ac3e0b8dc459fe338bdfb4e29d7eea8fd0926ba649ddd3f4765f" dependencies = [ "adler32", "crc32fast", "libflate_lz77", - "rle-decode-fast", ] [[package]] name = "libflate_lz77" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3286f09f7d4926fc486334f28d8d2e6ebe4f7f9994494b6dab27ddfad2c9b11b" +checksum = "39a734c0493409afcd49deee13c006a04e3586b9761a03543c6272c9c51f2f5a" +dependencies = [ + "rle-decode-fast", +] [[package]] name = "libsqlite3-sys" @@ -1259,76 +1432,35 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "lock_api" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" -dependencies = [ - "scopeguard", -] - [[package]] name = "log" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" -dependencies = [ - "log 0.4.11", -] - -[[package]] -name = "log" -version = "0.4.11" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", + "value-bag", ] [[package]] name = "matches" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "memchr" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" -dependencies = [ - "autocfg 1.0.0", -] - -[[package]] -name = "memoffset" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" -dependencies = [ - "autocfg 1.0.0", -] - -[[package]] -name = "mime" -version = "0.2.6" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ - "log 0.3.9", + "autocfg", ] [[package]] @@ -1337,42 +1469,31 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "1.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216929a5ee4dd316b1702eedf5e74548c123d370f47841ceaac38ca154690ca3" -dependencies = [ - "mime 0.2.6", - "phf", - "phf_codegen", - "unicase 1.4.2", -] - [[package]] name = "mime_guess" version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" dependencies = [ - "mime 0.3.16", - "unicase 2.6.0", + "mime", + "unicase", ] [[package]] name = "miniz_oxide" -version = "0.4.0" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ "adler", + "autocfg", ] [[package]] name = "mio" -version = "0.6.22" +version = "0.6.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" +checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" dependencies = [ "cfg-if 0.1.10", "fuchsia-zircon", @@ -1380,29 +1501,31 @@ dependencies = [ "iovec", "kernel32-sys", "libc", - "log 0.4.11", - "miow", + "log", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", ] [[package]] -name = "mio-uds" -version = "0.6.8" +name = "mio" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" +checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" dependencies = [ - "iovec", "libc", - "mio", + "log", + "miow 0.3.7", + "ntapi", + "winapi 0.3.9", ] [[package]] name = "miow" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" dependencies = [ "kernel32-sys", "net2", @@ -1410,19 +1533,28 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "miow" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "multipart" -version = "0.16.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136eed74cadb9edd2651ffba732b19a450316b680e4f48d6c79e905799e19d01" +checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" dependencies = [ "buf_redux", "httparse", - "log 0.4.11", - "mime 0.2.6", - "mime_guess 1.8.8", + "log", + "mime", + "mime_guess", "quick-error", - "rand 0.6.5", + "rand 0.8.4", "safemem", "tempfile", "twoway", @@ -1430,13 +1562,13 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.4" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0d88c06fe90d5ee94048ba40409ef1d9315d86f6f38c2efdaad4fb50c58b2d" +checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" dependencies = [ "lazy_static", "libc", - "log 0.4.11", + "log", "openssl", "openssl-probe", "openssl-sys", @@ -1448,9 +1580,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.34" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ "cfg-if 0.1.10", "libc", @@ -1459,24 +1591,33 @@ dependencies = [ [[package]] name = "nix" -version = "0.23.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f305c2c2e4c39a82f7bf0bf65fb557f9070ce06781d4f2454295cc34b1c43188" +checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" dependencies = [ "bitflags", "cc", "cfg-if 1.0.0", "libc", - "memoffset 0.6.4", + "memoffset", +] + +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi 0.3.9", ] [[package]] name = "num-integer" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ - "autocfg 1.0.0", + "autocfg", "num-traits", ] @@ -1486,14 +1627,14 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ - "autocfg 1.0.0", + "autocfg", ] [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", @@ -1501,24 +1642,18 @@ dependencies = [ [[package]] name = "object" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" - -[[package]] -name = "omnom" -version = "2.1.2" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b216cee2e0d6e680f73158d15468c80b39e571c11669cd90556f9a644e9fd3" +checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.5.2" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" [[package]] name = "oorandom" @@ -1540,31 +1675,31 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.30" +version = "0.10.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" dependencies = [ "bitflags", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "foreign-types", - "lazy_static", "libc", + "once_cell", "openssl-sys", ] [[package]] name = "openssl-probe" -version = "0.1.2" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.58" +version = "0.9.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +checksum = "7e46109c383602735fa0a2e48dd2b7c892b048e1bf69e5c3b1d804b7d9c203cb" dependencies = [ - "autocfg 1.0.0", + "autocfg", "cc", "libc", "pkg-config", @@ -1572,28 +1707,10 @@ dependencies = [ ] [[package]] -name = "parking_lot" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.7.2" +name = "parking" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" -dependencies = [ - "cfg-if 0.1.10", - "cloudabi", - "libc", - "redox_syscall", - "smallvec", - "winapi 0.3.9", -] +checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" [[package]] name = "percent-encoding" @@ -1601,65 +1718,26 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" -[[package]] -name = "phf" -version = "0.7.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3da44b85f8e8dfaec21adae67f95d93244b2ecf6ad2a692320598dcc8e6dd18" -dependencies = [ - "phf_shared", -] - -[[package]] -name = "phf_codegen" -version = "0.7.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03e85129e324ad4166b06b2c7491ae27fe3ec353af72e72cd1654c7225d517e" -dependencies = [ - "phf_generator", - "phf_shared", -] - -[[package]] -name = "phf_generator" -version = "0.7.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09364cc93c159b8b06b1f4dd8a4398984503483891b0c26b867cf431fb132662" -dependencies = [ - "phf_shared", - "rand 0.6.5", -] - -[[package]] -name = "phf_shared" -version = "0.7.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234f71a15de2288bcb7e3b6515828d22af7ec8598ee6d24c3b526fa0a80b67a0" -dependencies = [ - "siphasher", - "unicase 1.4.2", -] - [[package]] name = "pico-args" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1eee8b1f4966c8343d7ca0f5a8452cd35d5610a2e0efbe2a68cae44bef2046" +checksum = "28b9b4df73455c861d7cbf8be42f01d3b373ed7f02e378d55fa84eafc6f638b1" [[package]] name = "pin-project" -version = "0.4.22" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.22" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ "proc-macro2", "quote", @@ -1668,9 +1746,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pin-utils" @@ -1680,9 +1758,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.18" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" +checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" [[package]] name = "plotters" @@ -1712,55 +1790,71 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "polling" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "log", + "wepoll-ffi", + "winapi 0.3.9", +] + +[[package]] +name = "polyval" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" +dependencies = [ + "cpuid-bool", + "opaque-debug 0.3.0", + "universal-hash", +] + [[package]] name = "ppv-lite86" -version = "0.2.8" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro-error" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc175e9777c3116627248584e8f8b3e2987405cabe1c0adf7d1dd28f09dc7880" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", "syn", - "version_check 0.9.2", + "version_check", ] [[package]] name = "proc-macro-error-attr" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cc9795ca17eb581285ec44936da7fc2335a3f34f2ddd13118b6f4d515435c50" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2", "quote", - "syn", - "syn-mid", - "version_check 0.9.2", + "version_check", ] [[package]] name = "proc-macro-hack" -version = "0.5.16" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" - -[[package]] -name = "proc-macro-nested" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.32" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ "unicode-xid", ] @@ -1781,15 +1875,15 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.16.2" +version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d883f78645c21b7281d21305181aa1f4dd9e9363e7cf2566c93121552cff003e" +checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" [[package]] name = "psm" -version = "0.1.12" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3abf49e5417290756acfd26501536358560c4a5cc4a0934d390939acb3e7083a" +checksum = "cd136ff4382c4753fc061cb9e4712ab2af263376b95bbd5bd8cd50c020b78e69" dependencies = [ "cc", ] @@ -1802,7 +1896,7 @@ dependencies = [ "async-std", "base64 0.12.3", "http-types", - "rand 0.7.2", + "rand 0.7.3", "serde", "serde_derive", "serde_json", @@ -1817,39 +1911,20 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.10" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" +checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" dependencies = [ "proc-macro2", ] [[package]] name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -dependencies = [ - "autocfg 0.1.7", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc 0.1.0", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg", - "rand_xorshift", - "winapi 0.3.9", -] - -[[package]] -name = "rand" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae1b169243eaf61759b8475a998f0a385e42042370f3a7dbaf35246eacc8412" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -1857,13 +1932,15 @@ dependencies = [ ] [[package]] -name = "rand_chacha" -version = "0.1.1" +name = "rand" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ - "autocfg 0.1.7", - "rand_core 0.3.1", + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.3", + "rand_hc 0.3.1", ] [[package]] @@ -1877,36 +1954,31 @@ dependencies = [ ] [[package]] -name = "rand_core" +name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ - "rand_core 0.4.2", + "ppv-lite86", + "rand_core 0.6.3", ] -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - [[package]] name = "rand_core" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom", + "getrandom 0.1.16", ] [[package]] -name = "rand_hc" -version = "0.1.0" +name = "rand_core" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "rand_core 0.3.1", + "getrandom 0.2.4", ] [[package]] @@ -1919,56 +1991,12 @@ dependencies = [ ] [[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi 0.3.9", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "winapi 0.3.9", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.4.2", -] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" +name = "rand_hc" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" dependencies = [ - "rand_core 0.3.1", + "rand_core 0.6.3", ] [[package]] @@ -1977,8 +2005,8 @@ version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" dependencies = [ - "autocfg 1.0.0", - "crossbeam-deque 0.8.1", + "autocfg", + "crossbeam-deque", "either", "rayon-core", ] @@ -1989,49 +2017,41 @@ version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" dependencies = [ - "crossbeam-channel 0.5.2", - "crossbeam-deque 0.8.1", - "crossbeam-utils 0.8.6", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-utils", "lazy_static", "num_cpus", ] [[package]] -name = "rdrand" -version = "0.4.0" +name = "redox_syscall" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" dependencies = [ - "rand_core 0.3.1", + "bitflags", ] -[[package]] -name = "redox_syscall" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - [[package]] name = "redox_users" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" +checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom", + "getrandom 0.2.4", "redox_syscall", - "rust-argon2", ] [[package]] name = "regex" -version = "1.3.9" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" dependencies = [ "aho-corasick", "memchr", "regex-syntax", - "thread_local", ] [[package]] @@ -2042,9 +2062,9 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-syntax" -version = "0.6.18" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "remove_dir_all" @@ -2057,24 +2077,25 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b82c9238b305f26f53443e3a4bc8528d64b8d0bee408ec949eb7bf5635ec680" +checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "bytes", "encoding_rs", "futures-core", "futures-util", + "h2", "http", "http-body", "hyper", "hyper-tls", + "ipnet", "js-sys", "lazy_static", - "log 0.4.11", - "mime 0.3.16", - "mime_guess 2.0.3", + "log", + "mime", "native-tls", "percent-encoding", "pin-project-lite", @@ -2083,7 +2104,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "tokio", - "tokio-tls", + "tokio-native-tls", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -2093,9 +2114,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.19" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", @@ -2119,9 +2140,9 @@ dependencies = [ [[package]] name = "rle-decode-fast" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cabe4fa914dec5870285fa7f71f602645da47c486e68486d2b4ceb4a343e90ac" +checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" [[package]] name = "rusqlite" @@ -2140,23 +2161,11 @@ dependencies = [ "smallvec", ] -[[package]] -name = "rust-argon2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" -dependencies = [ - "base64 0.12.3", - "blake2b_simd", - "constant_time_eq", - "crossbeam-utils 0.7.2", -] - [[package]] name = "rustc-demangle" -version = "0.1.16" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" [[package]] name = "rustc_version" @@ -2173,27 +2182,32 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.4", + "semver 1.0.5", ] [[package]] name = "rustls" -version = "0.17.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0d4a31f5d68413404705d6982529b0e11a9aacd4839d1d6222ee3b8cb4015e1" +checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" dependencies = [ - "base64 0.11.0", - "log 0.4.11", + "log", "ring", "sct", "webpki", ] +[[package]] +name = "rustversion" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" + [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "safemem" @@ -2234,9 +2248,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "sct" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", @@ -2263,9 +2277,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "0.4.4" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64808902d7d99f78eaddd2b4e2509713babc3dc3c85ad6f4c447680f3c01e535" +checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" dependencies = [ "bitflags", "core-foundation", @@ -2276,9 +2290,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "0.4.3" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17bf11d99252f512695eb468de5516e5cf75455521e69dfe343f3b74e4748405" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" dependencies = [ "core-foundation-sys", "libc", @@ -2295,9 +2309,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +checksum = "0486718e92ec9a68fbed73bb5ef687d71103b142595b406835649bebd33f72c7" [[package]] name = "semver-parser" @@ -2307,9 +2321,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.114" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" +checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" dependencies = [ "serde_derive", ] @@ -2326,9 +2340,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.114" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" +checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" dependencies = [ "proc-macro2", "quote", @@ -2337,15 +2351,26 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.56" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3433e879a558dde8b5e8feb2a04899cf34fdde1fafb894687e52105fc1162ac3" +checksum = "d23c1ba4cf0efd44be32017709280b32d1cea5c3f1275c3b6d9e8bc54f758085" dependencies = [ - "itoa", + "itoa 1.0.1", "ryu", "serde", ] +[[package]] +name = "serde_qs" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7715380eec75f029a4ef7de39a9200e0a63823176b759d055b613f5a87df6a6" +dependencies = [ + "percent-encoding", + "serde", + "thiserror", +] + [[package]] name = "serde_stacker" version = "0.1.4" @@ -2358,33 +2383,43 @@ dependencies = [ [[package]] name = "serde_urlencoded" -version = "0.6.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ - "dtoa", - "itoa", + "form_urlencoded", + "itoa 1.0.1", + "ryu", "serde", - "url", ] [[package]] name = "sha-1" -version = "0.8.2" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.9.0", + "opaque-debug 0.3.0", ] [[package]] name = "sha1" -version = "0.6.0" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" +dependencies = [ + "sha1_smol", +] + +[[package]] +name = "sha1_smol" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" [[package]] name = "sha2" @@ -2400,22 +2435,22 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.2" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", "cfg-if 1.0.0", - "cpuid-bool", + "cpufeatures", "digest 0.9.0", "opaque-debug 0.3.0", ] [[package]] name = "sha2-asm" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92cfa120723b595090343400d71e4921ba4fbc7d0d48718d72c20b3348469678" +checksum = "a7c2f225be6502f2134e6bbb35bb5e2957e41ffa0495ed08bce2e2b4ca885da4" dependencies = [ "cc", ] @@ -2433,29 +2468,38 @@ dependencies = [ "opaque-debug 0.2.3", ] +[[package]] +name = "simple-mutex" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38aabbeafa6f6dead8cebf246fe9fae1f9215c8d29b3a69f93bd62a9e4a3dcd6" +dependencies = [ + "event-listener", +] + [[package]] name = "siphasher" -version = "0.2.3" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8de496cf83d4ed58b6be86c3a275b8602f6ffe98d3024a869e124147a9a3ac" +checksum = "a86232ab60fa71287d7f2ddae4a7073f6b7aac33631c3015abb556f08c6d0a3e" [[package]] name = "slab" -version = "0.4.2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "slog" -version = "2.5.2" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cc9c640a4adbfbcc11ffb95efe5aa7af7309e002adab54b185507dbf2377b99" +checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" [[package]] name = "slog-json" -version = "2.3.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc0d2aff1f8f325ef660d9a0eb6e6dcd20b30b3f581a5897f58bf42d061c37a" +checksum = "0f7f7a952ce80fca9da17bf0a53895d11f8aa1ba063668ca53fc72e7869329e9" dependencies = [ "chrono", "serde", @@ -2465,9 +2509,9 @@ dependencies = [ [[package]] name = "slog-term" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab1d807cf71129b05ce36914e1dbb6fbfbdecaf686301cb457f4fa967f9f5b6" +checksum = "95c1e7e5aab61ced6006149ea772770b84a0d16ce0f7885def313e4829946d76" dependencies = [ "atty", "chrono", @@ -2478,19 +2522,17 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.4.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3757cb9d89161a2f24e1cf78efa0c1fcff485d18e3f55e0aa3480824ddaa0f3f" +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" [[package]] name = "socket2" -version = "0.3.12" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" dependencies = [ - "cfg-if 0.1.10", "libc", - "redox_syscall", "winapi 0.3.9", ] @@ -2500,17 +2542,11 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "stacker" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3f47e840d001df3b785fbc3b84c7228519bdf63d4fb61b9e9f50f7fa153ce10" +checksum = "90939d5171a4420b3ff5fbc8954d641e7377335454c259dcb80786f3f21dc9b4" dependencies = [ "cc", "cfg-if 1.0.0", @@ -2532,7 +2568,7 @@ dependencies = [ "lazy_static", "libc", "pico-args", - "rand 0.7.2", + "rand 0.7.3", "reqwest", "ring", "rusqlite", @@ -2548,11 +2584,11 @@ dependencies = [ [[package]] name = "standback" -version = "0.2.9" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0437cfb83762844799a60e1e3b489d5ceb6a650fbacb86437badc1b6d87b246" +checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" dependencies = [ - "version_check 0.9.2", + "version_check", ] [[package]] @@ -2609,46 +2645,35 @@ name = "stx-genesis" version = "0.1.0" dependencies = [ "libflate", - "sha2 0.9.2", + "sha2 0.9.9", ] [[package]] name = "subtle" -version = "2.2.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.81" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" +checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" dependencies = [ "proc-macro2", "quote", "unicode-xid", ] -[[package]] -name = "syn-mid" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "tempfile" -version = "3.1.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", + "fastrand", "libc", - "rand 0.7.2", "redox_syscall", "remove_dir_all", "winapi 0.3.9", @@ -2656,11 +2681,12 @@ dependencies = [ [[package]] name = "term" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0863a3345e70f61d613eab32ee046ccd1bcc5f9105fe402c61fcd0c13eeb8b5" +checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" dependencies = [ - "dirs", + "dirs-next", + "rustversion", "winapi 0.3.9", ] @@ -2675,18 +2701,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.20" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.20" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", @@ -2695,43 +2721,44 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] [[package]] name = "time" -version = "0.2.23" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" +checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" dependencies = [ "const_fn", "libc", "standback", "stdweb", "time-macros", - "version_check 0.9.2", + "version_check", "winapi 0.3.9", ] [[package]] name = "time-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ae9b6e9f095bc105e183e3cd493d72579be3181ad4004fceb01adbe9eecab2d" +checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" dependencies = [ "proc-macro-hack", "time-macros-impl", @@ -2739,9 +2766,9 @@ dependencies = [ [[package]] name = "time-macros-impl" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" +checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -2768,46 +2795,63 @@ dependencies = [ [[package]] name = "tinyvec" -version = "0.3.3" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.21" +version = "1.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d099fa27b9702bed751524694adbe393e18b36b204da91eb1cbbbbb4a5ee2d58" +checksum = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a" dependencies = [ "bytes", - "fnv", - "futures-core", - "iovec", - "lazy_static", + "libc", "memchr", - "mio", + "mio 0.7.14", "num_cpus", "pin-project-lite", - "slab", + "winapi 0.3.9", ] [[package]] -name = "tokio-tls" -version = "0.3.1" +name = "tokio-native-tls" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" dependencies = [ "native-tls", "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-tungstenite" -version = "0.10.1" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8b8fe88007ebc363512449868d7da4389c9400072a3f666f212c7280082882a" +checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" dependencies = [ - "futures", - "log 0.4.11", + "futures-util", + "log", "pin-project", "tokio", "tungstenite", @@ -2815,49 +2859,50 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.3.1" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" dependencies = [ "bytes", "futures-core", "futures-sink", - "log 0.4.11", + "log", "pin-project-lite", "tokio", ] [[package]] name = "toml" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.16" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e2a2de6b0d5cbb13fc21193a2296888eaab62b6044479aafb3c54c01c29fcd" +checksum = "2d8d93354fe2a8e50d5953f5ae2e47a3fc2ef03292e7ea46e3cc38f549525fb9" dependencies = [ - "cfg-if 0.1.10", - "log 0.4.11", + "cfg-if 1.0.0", + "log", + "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" -version = "0.1.11" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94ae75f0d28ae10786f3b1895c55fe72e79928fd5ccdebb5438c75e93fec178f" +checksum = "03cfcb51380632a72d3111cb8d3447a8d908e577d31beeac006f836383d29a23" dependencies = [ "lazy_static", ] @@ -2870,19 +2915,19 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "tungstenite" -version = "0.10.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfea31758bf674f990918962e8e5f07071a3161bd7c4138ed23e416e1ac4264e" +checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" dependencies = [ - "base64 0.11.0", + "base64 0.13.0", "byteorder", "bytes", "http", "httparse", - "input_buffer", - "log 0.4.11", - "rand 0.7.2", + "log", + "rand 0.8.4", "sha-1", + "thiserror", "url", "utf-8", ] @@ -2898,18 +2943,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" - -[[package]] -name = "unicase" -version = "1.4.2" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" -dependencies = [ - "version_check 0.1.5", -] +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "unicase" @@ -2917,23 +2953,20 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" dependencies = [ - "version_check 0.9.2", + "version_check", ] [[package]] name = "unicode-bidi" -version = "0.3.4" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -dependencies = [ - "matches", -] +checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" [[package]] name = "unicode-normalization" -version = "0.1.13" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" dependencies = [ "tinyvec", ] @@ -2946,9 +2979,19 @@ checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] name = "unicode-xid" -version = "0.2.1" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" + +[[package]] +name = "universal-hash" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" +dependencies = [ + "generic-array 0.14.5", + "subtle", +] [[package]] name = "untrusted" @@ -2958,44 +3001,50 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.1.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" dependencies = [ + "form_urlencoded", "idna", "matches", "percent-encoding", + "serde", ] [[package]] -name = "urlencoding" -version = "1.1.1" +name = "utf-8" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9232eb53352b4442e40d7900465dfc534e8cb2dc8f18656fcb2ac16112b5593" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] -name = "utf-8" -version = "0.7.5" +name = "value-bag" +version = "1.0.0-alpha.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e42f7c18b8f902290b009cde6d651262f956c98bc51bca4cd1d511c9cd85c7" +checksum = "79923f7731dc61ebfba3633098bf3ac533bbd35ccd8c57e7088d9a5eebe0263f" +dependencies = [ + "ctor", + "version_check", +] [[package]] name = "vcpkg" -version = "0.2.10" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.1.5" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] -name = "version_check" -version = "0.9.2" +name = "waker-fn" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" @@ -3014,34 +3063,38 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ - "log 0.4.11", + "log", "try-lock", ] [[package]] name = "warp" -version = "0.2.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e95175b7a927258ecbb816bdada3cc469cb68593e7940b96a60f4af366a9970" +checksum = "3cef4e1e9114a4b7f1ac799f16ce71c14de5778500c5450ec6b7b920c55b587e" dependencies = [ "bytes", - "futures", + "futures-channel", + "futures-util", "headers", "http", "hyper", - "log 0.4.11", - "mime 0.3.16", - "mime_guess 2.0.3", + "log", + "mime", + "mime_guess", "multipart", + "percent-encoding", "pin-project", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", + "tokio-stream", "tokio-tungstenite", + "tokio-util", "tower-service", - "urlencoding", + "tracing", ] [[package]] @@ -3050,6 +3103,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" version = "0.2.79" @@ -3057,8 +3116,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" dependencies = [ "cfg-if 1.0.0", - "serde", - "serde_json", "wasm-bindgen-macro", ] @@ -3070,7 +3127,7 @@ checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" dependencies = [ "bumpalo", "lazy_static", - "log 0.4.11", + "log", "proc-macro2", "quote", "syn", @@ -3079,11 +3136,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.15" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ad6e4e8b2b7f8c90b6e09a9b590ea15cb0d1dbe28502b5a405cd95d1981671" +checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", @@ -3130,14 +3187,23 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ "ring", "untrusted", ] +[[package]] +name = "wepoll-ffi" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" +dependencies = [ + "cc", +] + [[package]] name = "winapi" version = "0.2.8" @@ -3202,6 +3268,6 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.1.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" +checksum = "7c88870063c39ee00ec285a2f8d6a966e5b6fb2becc4e8dac77ed0d370ed6006" diff --git a/Cargo.toml b/Cargo.toml index 09d90457b4..83a54a5863 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,7 +43,7 @@ name = "block_limits" harness = false [dependencies] -rand = "=0.7.2" +rand = "0.7.3" rand_chacha = "=0.2.2" serde = "1" serde_derive = "1" @@ -63,6 +63,7 @@ slog-term = "2.6.0" slog-json = { version = "2.3.0", optional = true } chrono = "0.4.19" libc = "0.2.82" +siphasher = "0.3.7" [target.'cfg(unix)'.dependencies] nix = "0.23" diff --git a/README.md b/README.md index 07da6847c1..0b736ff686 100644 --- a/README.md +++ b/README.md @@ -337,12 +337,14 @@ wait_time_for_microblocks = 10000 [miner] # Smallest allowed tx fee, in microSTX min_tx_fee = 100 -# Time to spend on the first attempt to make a block. +# Time to spend on the first attempt to make a block, in milliseconds. # This can be small, so your node gets a block-commit into the Bitcoin mempool early. -first_attempt_time_ms: 1000 -# Time to spend on subsequent attempts to make a block. +first_attempt_time_ms = 1000 +# Time to spend on subsequent attempts to make a block, in milliseconds. # This can be bigger -- new block-commits will be RBF'ed. -subsequent_attempt_time_ms: 60000 +subsequent_attempt_time_ms = 60000 +# Time to spend mining a microblock, in milliseconds. +microblock_attempt_time_ms = 30000 ``` You can verify that your node is operating as a miner by checking its log output @@ -361,7 +363,9 @@ Fee and cost estimators can be configure via the config section `[fee_estimation ``` [fee_estimation] cost_estimator = naive_pessimistic -fee_estimator = scalar_fee_rate +fee_estimator = fuzzed_weighted_median_fee_rate +fee_rate_fuzzer_fraction = 0.1 +fee_rate_window_size = 5 cost_metric = proportion_dot_product log_error = true enabled = true @@ -378,6 +382,11 @@ are **not** consensus-critical components, but rather can be used by miners to rank transactions in the mempool or client to determine appropriate fee rates for transactions before broadcasting them. +The `fuzzed_weighted_median_fee_rate` uses a +median estimate from a window of the fees paid in the last `fee_rate_window_size` blocks. +Estimates are then randomly "fuzzed" using uniform random fuzz of size up to +`fee_rate_fuzzer_fraction` of the base estimate. + ## Non-Consensus Breaking Release Process For non-consensus breaking releases, this project uses the following release process: @@ -395,9 +404,9 @@ discussed in [Versioning](#versioning). We assume, in this section, that the change is not consensus-breaking. So, the release manager must first determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". This means, in other words, that the database schema has -changed. Then, the release manager should determine whether this is a feature -release, as opposed to a hot fix or a patch. Given the answers to these -questions, the version number can be computed. +changed, but an automatic migration was not implemented. Then, the release manager +should determine whether this is a feature release, as opposed to a hot fix or a +patch. Given the answers to these questions, the version number can be computed. 1. The release manager enumerates the PRs or issues that would _block_ the release. A label should be applied to each such issue/PR as @@ -453,7 +462,10 @@ is tagged. 1. Once the final release candidate has rolled out successfully without issue on the above staging infrastructure, the release manager tags 2 additional `stacks-blockchain` - team members to review the `develop -> master` PR. + team members to review the `develop -> master` PR. If there is a merge conflict in this + PR, this is the protocol: open a branch off of develop, merge master into that branch, + and then open a PR from this side branch to develop. The merge conflicts will be + resolved. 1. Once reviewed and approved, the release manager merges the PR, and tags the release via the [`stacks-blockchain` Github action]((https://github.com/blockstack/stacks-blockchain/actions/workflows/stacks-blockchain.yml)) diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index b1dd842a10..66ef686917 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -20,7 +20,7 @@ path = "../src/libclarity.rs" [dependencies] tini = "0.2" -rand = "=0.7.2" +rand = "0.7.3" rand_chacha = "=0.2.2" serde = "1" serde_derive = "1" @@ -40,6 +40,7 @@ slog-term = "2.6.0" slog-json = { version = "2.3.0", optional = true } chrono = "0.4.19" libc = "0.2.82" +siphasher = "0.3.7" [target.'cfg(unix)'.dependencies] nix = "0.23" diff --git a/docs/event-dispatcher.md b/docs/event-dispatcher.md index 2c86f54781..a6a7678f97 100644 --- a/docs/event-dispatcher.md +++ b/docs/event-dispatcher.md @@ -245,7 +245,7 @@ Example: ```json { "block_hash": "0x4eaabcd105865e471f697eff5dd5bd85d47ecb5a26a3379d74fae0ae87c40904", - "staks_height": 3, + "stacks_height": 3, "target_burn_height": 745000, "block_size": 145000, "anchored_cost": { @@ -261,6 +261,81 @@ Example: "write_count": 5, "read_length": 150, "write_length": 75 - } + }, + "tx_events": [ + { + "Success": { + "txid": "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6", + "fee": 0, + "execution_cost": { + "write_length": 0, + "write_count": 0, + "read_length": 0, + "read_count": 0, + "runtime": 0 + }, + "result": { + "ResponseData": + { + "committed": true, + "data": true + } + } + }}, + { + "ProcessingError": { + "txid": "eef9f46b20fb637bd07ec92ad3ec175a5a4bdf3e8799259fc5b16a272090d4de", + "error": "Duplicate contract 'ST3BMYNT1DW2QSRZWB6M4S183NK1BXGJ41TEBCCH8.example'" + } + } + ] +} +``` + +### `POST /mined_microblock` + +This payload includes data related to microblocks mined by this Stacks node. This +will never be invoked if the node is configured only as a follower. This is invoked +when the miner **assembles** the microblock; this microblock may or may be incorporated +into the canonical chain. + +This endpoint will only broadcast events to observers that explicitly register for +`MinedMicroblocks` events, `AnyEvent` observers will not receive the events by default. + +Example: + +```json +{ + "block_hash": "0x4eaabcd105865e471f697eff5dd5bd85d47ecb5a26a3379d74fae0ae87c40904", + "sequence": 3, + "anchor_block_consensus_hash": "53c166a709a9abd64a92a57f928a8b26aad08992", + "anchor_block": "43dbf6095c7622db6607d9584c3f65e908ca4eb77d86ee8cc1352aafec5d68b5", + "tx_events": [ + { + "Success": { + "txid": "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6", + "fee": 0, + "execution_cost": { + "write_length": 10, + "write_count": 10, + "read_length": 20, + "read_count": 10, + "runtime": 1290 + }, + "result": { + "ResponseData": + { + "committed": true, + "data": true + } + } + }}, + { + "Skipped": { + "txid": "eef9f46b20fb637bd07ec92ad3ec175a5a4bdf3e8799259fc5b16a272090d4de", + "reason": "tx.anchor_mode does not support microblocks, anchor_mode=OnChainOnly." + } + } + ] } ``` diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index a5e920340a..e75a8e1948 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -80,6 +80,96 @@ Reason types without additional information will not have a Get current PoX-relevant information. See OpenAPI [spec](./rpc/openapi.yaml) for details. +### GET /v2/headers/[Count] + +Get a given number of ancestral Stacks block headers, in order from newest to +oldest. If the `?tip=` query parameter is given, the headers will be loaded +from the block identified by the tip. If no `?tip=` query parameter is given, +then the canonical Stacks chain tip will be used. The first header in the list +is the header of the `?tip=` query parameter (or the canonical tip of the blockchain); +the second header is the parent block's header; the third header is the +grandparent block's header, and so on. [Count] determines how many headers, including this first header, to return. + +Up to 2100 headers (one PoX reward cycle) may be returned by this endpoint. +Callers who wish to download more headers will need to issue this query +multiple times, with a `?tip=` query parameter set to the index block hash of +the earliest header received. + +Returns a +[SIP-003](https://github.com/stacksgov/sips/blob/main/sips/sip-003/sip-003-peer-network.md)-encoded +vector with length up to [Count] that contains a list of the following SIP-003-encoded +structures: + +``` +struct ExtendedStacksHeader { + consensus_hash: ConsensusHash, + header: StacksBlockHeader, + parent_block_id: StacksBlockId, +} +``` + +Where `ConsensusHash` is a 20-byte byte buffer. + +Where `StacksBlockId` is a 32-byte byte buffer. + +Where `StacksBlockHeader` is the following SIP-003-encoded structure: + +``` +struct StacksBlockHeader { + version: u8, + total_work: StacksWorkScore, + proof: VRFProof, + parent_block: BlockHeaderHash, + parent_microblock: BlockHeaderHash, + parent_microblock_sequence: u16, + tx_merkle_root: Sha512Trunc256Sum, + state_index_root: TrieHash, + microblock_pubkey_hash: Hash160, +} +``` + +Where `BlockHeaderHash`, `Sha512Trunc256Sum`, and `TrieHash` are 32-byte byte +buffers. + +Where `Hash160` is a 20-byte byte buffer. + +Where `StacksWorkScore` and `VRFProof` are the following SIP-003-encoded structures: + +``` +struct StacksWorkScore { + burn: u64, + work: u64, +} +``` + +``` +struct VRFProof { + Gamma: [u8; 32] + c: [u8; 16] + s: [u8; 32] +} +``` + +The interpretation of most these fields is beyond the scope of this document (please +see +[SIP-005](https://github.com/stacksgov/sips/blob/main/sips/sip-005/sip-005-blocks-and-transactions.md) +for details). However, it is worth pointing out that `parent_block_id` is a +valid argument to the `?tip=` query parameter. If the caller of this API +endpoint wants to receive more than 2100 contiguous headers, it would use the +oldest header's `parent_block_id` field from the previous call as the `?tip=` +argument to the next call in order to fetch the next batch of ancestor headers. + +This API endpoint may return a list of zero headers if `?tip=` refers to the +hash of the Stacks genesis block. + +This API endpoint will return HTTP 404 if the `?tip=` argument is given but +refers to a nonexistent Stacks block, or a Stacks block that has not yet been +processed by the node. + +The `?tip=` argument may refer to a Stacks block that is not on the canonical +fork. In this case, this endpoint behaves as described above, except that +non-canonical headers will be returned instead. + ### GET /v2/accounts/[Principal] Get the account data for the provided principal. @@ -107,6 +197,25 @@ object with balance and nonce of 0. This endpoint also accepts a querystring parameter `?proof=` which when supplied `0`, will return the JSON object _without_ the `balance_proof` or `nonce_proof` fields. +### GET /v2/data_var/[Stacks Address]/[Contract Name]/[Var Name] + +Attempt to vetch a data var from a contract. The contract is identified with [Stacks Address] and + [Contract Name] in the URL path. The variable is identified with [Var Name]. + +Returns JSON data in the form: + +``` +{ + "data": "0x01ce...", + "proof": "0x01ab...", +} +``` + +Where data is the hex serialization of the variable value. + +This endpoint also accepts a querystring parameter `?proof=` which when supplied `0`, will return the +JSON object _without_ the `proof` field. + ### POST /v2/map_entry/[Stacks Address]/[Contract Name]/[Map Name] Attempt to fetch data from a contract data map. The contract is identified with [Stacks Address] and diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 76e1c5c078..0c3d7c09e5 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -73,8 +73,8 @@ paths: in: query schema: type: string - description: The Stacks chain tip to query from - + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). /v2/map_entry/{contract_address}/{contract_name}/{map_name}: post: summary: Get specific data-map inside a contract @@ -126,7 +126,8 @@ paths: in: query schema: type: string - description: The Stacks chain tip to query from + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). x-codegen-request-body-name: key requestBody: description: Hex string serialization of the lookup key (which should be a Clarity value) @@ -174,7 +175,8 @@ paths: in: query schema: type: string - description: The Stacks chain tip to query from + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). required: false /v2/contracts/call-read/{contract_address}/{contract_name}/{function_name}: @@ -222,7 +224,8 @@ paths: in: query schema: type: string - description: The Stacks chain tip to query from + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). required: false requestBody: description: map of arguments and the simulated tx-sender where sender is either a Contract identifier or a normal Stacks address, and arguments is an array of hex serialized Clarity values. @@ -265,7 +268,8 @@ paths: in: query schema: type: string - description: The Stacks chain tip to query from + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). responses: 200: description: Success @@ -416,6 +420,13 @@ paths: $ref: ./api/core-node/get-pox.schema.json example: $ref: ./api/core-node/get-pox.example.json + parameters: + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). /v2/traits/{contract_address}/{contract_name}/{trait_contract_address}/{trait_ contract_name}/{trait_name}: get: @@ -468,4 +479,7 @@ paths: in: query schema: type: string - description: The Stacks chain tip to query from + description: | + The Stacks chain tip to query from. + If tip == "latest", the query will be run from the latest known tip (includes unconfirmed state). + If the tip is left unspecified, the stacks chain tip will be selected (only includes confirmed state). diff --git a/src/blockstack_cli.rs b/src/blockstack_cli.rs index a2b65bfbd1..ac35ce79e5 100644 --- a/src/blockstack_cli.rs +++ b/src/blockstack_cli.rs @@ -43,6 +43,7 @@ use blockstack_lib::codec::{Error as CodecError, StacksMessageCodec}; use blockstack_lib::core::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use blockstack_lib::net::Error as NetError; use blockstack_lib::types::chainstate::StacksAddress; +use blockstack_lib::types::chainstate::StacksBlockHeader; use blockstack_lib::util::{ hash::hex_bytes, hash::to_hex, log, retry::LogReader, strings::StacksString, }; @@ -65,6 +66,7 @@ This CLI has these methods: token-transfer used to generate and sign a transfer transaction addresses used to get both Bitcoin and Stacks addresses from a private key decode-tx used to decode a hex-encoded transaction into a human-readable representation + decode-header used to decode a hex-encoded Stacks header into a human-readable representation decode-block used to decode a hex-encoded Stacks block into a human-readable representation decode-microblock used to decode a hex-encoded Stacks microblock into a human-readable representation @@ -145,19 +147,25 @@ const DECODE_TRANSACTION_USAGE: &str = "blockstack-cli (options) decode-tx [transaction-hex-or-stdin] The decode-tx command decodes a serialized Stacks transaction and prints it to stdout as JSON. -The transaction, if given, must be a hex string. Alternatively, you may pass - instead, and the +The transaction, if given, must be a hex string. Alternatively, you may pass `-` instead, and the raw binary transaction will be read from stdin."; +const DECODE_HEADER_USAGE: &str = "blockstack-cli (options) decode-header [block-path-or-stdin] + +The decode-header command decodes a serialized Stacks header and prints it to stdout as JSON. +The header, if given, must be a hex string. Alternatively, you may pass `-` instead, and the +raw binary header will be read from stdin."; + const DECODE_BLOCK_USAGE: &str = "blockstack-cli (options) decode-block [block-path-or-stdin] The decode-block command decodes a serialized Stacks block and prints it to stdout as JSON. -The block, if given, must be a hex string. Alternatively, you may pass - instead, and the +The block, if given, must be a hex string. Alternatively, you may pass `-` instead, and the raw binary block will be read from stdin."; const DECODE_MICROBLOCK_USAGE: &str = "blockstack-cli (options) decode-microblock [microblock-path-or-stdin] The decode-microblock command decodes a serialized Stacks microblock and prints it to stdout as JSON. -The microblock, if given, must be a hex string. Alternatively, you may pass - instead, and the +The microblock, if given, must be a hex string. Alternatively, you may pass `-` instead, and the raw binary microblock will be read from stdin. N.B. Stacks microblocks are not stored as files in the Stacks chainstate -- they are stored in @@ -660,6 +668,45 @@ fn decode_transaction(args: &[String], _version: TransactionVersion) -> Result Result { + if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + return Err(CliError::Message(format!( + "Usage: {}\n", + DECODE_HEADER_USAGE + ))); + } + let header_data = if args[0] == "-" { + // read from stdin + let mut header_str = Vec::new(); + io::stdin() + .read_to_end(&mut header_str) + .expect("Failed to read header from stdin"); + header_str + } else { + // given as a command-line arg + hex_bytes(&args[0].clone()).expect("Failed to decode header: must be a hex string") + }; + + let mut cursor = io::Cursor::new(&header_data); + let mut debug_cursor = LogReader::from_reader(&mut cursor); + + match StacksBlockHeader::consensus_deserialize(&mut debug_cursor) { + Ok(header) => { + Ok(serde_json::to_string(&header).expect("Failed to serialize header to JSON")) + } + Err(e) => { + let mut ret = String::new(); + ret.push_str(&format!("Failed to decode header: {:?}\n", &e)); + ret.push_str("Bytes consumed:\n"); + for buf in debug_cursor.log().iter() { + ret.push_str(&format!(" {}", to_hex(buf))); + } + ret.push_str("\n"); + Ok(ret) + } + } +} + fn decode_block(args: &[String], _version: TransactionVersion) -> Result { if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( @@ -774,6 +821,7 @@ fn main_handler(mut argv: Vec) -> Result { "generate-sk" => generate_secret_key(args, tx_version), "addresses" => get_addresses(args, tx_version), "decode-tx" => decode_transaction(args, tx_version), + "decode-header" => decode_header(args, tx_version), "decode-block" => decode_block(args, tx_version), "decode-microblock" => decode_microblock(args, tx_version), _ => Err(CliError::Usage), @@ -1110,4 +1158,15 @@ mod test { let result = main_handler(to_string_vec(&block_args)).unwrap(); eprintln!("result:\n{}", result); } + + #[test] + fn simple_decode_header() { + let header_args = [ + "decode-header", + "24000000000000000100000000000000019275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a2154900325010cc49a050c23e6ffb0581afebbb27f41e65a5ecfd68548982f824f7a33ed32849b7524eceec0a9f29d9d624314059d56fefd55bca56944f3fe2d003488d4a00c92575d68c6f6dd659046585f5d5209e65829a3a673c04692f5e3dc2802020202020202020202020202020202020202020202020202020202020202023ad2cf6dfced0536fc850eb86827df634877c035", + ]; + + let result = main_handler(to_string_vec(&header_args)).unwrap(); + eprintln!("result:\n{}", result); + } } diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index 56e79aae96..8f68901134 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -370,6 +370,14 @@ impl BitcoinIndexer { true, false, )?; + if let Some(last_block) = last_block.as_ref() { + // do we need to do anything? + let cur_height = spv_client.get_headers_height()?; + if *last_block <= cur_height { + debug!("SPV client has all headers up to {}", cur_height); + return Ok(cur_height); + } + } spv_client .run(self) .and_then(|_r| Ok(spv_client.end_block_height.unwrap())) diff --git a/src/burnchains/burnchain.rs b/src/burnchains/burnchain.rs index ae4e84227d..98aaacd5cc 100644 --- a/src/burnchains/burnchain.rs +++ b/src/burnchains/burnchain.rs @@ -618,6 +618,7 @@ impl Burnchain { db_path } + /// Connect to the burnchain databases. They may or may not already exist. pub fn connect_db( &self, indexer: &I, @@ -651,7 +652,7 @@ impl Burnchain { Ok((sortitiondb, burnchaindb)) } - /// Open the burn database. It must already exist. + /// Open the burn databases. They must already exist. pub fn open_db(&self, readwrite: bool) -> Result<(SortitionDB, BurnchainDB), burnchain_error> { let db_path = self.get_db_path(); let burnchain_db_path = self.get_burnchaindb_path(); @@ -1145,6 +1146,37 @@ impl Burnchain { Ok((block_snapshot, state_transition_opt)) } + /// Get the highest burnchain block processed, if we have processed any. + /// Return Some(..) if we have processed at least one processed burnchain block; return None + /// otherwise. + pub fn get_highest_burnchain_block( + &self, + ) -> Result, burnchain_error> { + let burndb = match self.open_db(true) { + Ok((_sortdb, burndb)) => burndb, + Err(burnchain_error::DBError(db_error::NoDBError)) => { + // databases not yet initialized, so no blocks processed + return Ok(None); + } + Err(e) => { + return Err(e); + } + }; + + let burn_chain_tip = match burndb.get_canonical_chain_tip() { + Ok(tip) => tip, + Err(burnchain_error::MissingParentBlock) => { + // database is empty + return Ok(None); + } + Err(e) => { + return Err(e); + } + }; + + Ok(Some(burn_chain_tip)) + } + /// Top-level burnchain sync. /// Returns the burnchain block header for the new burnchain tip, which will be _at least_ as /// high as target_block_height_opt (if given), or whatever is currently at the tip of the @@ -1245,6 +1277,9 @@ impl Burnchain { start_block + max_blocks ); end_block = start_block + max_blocks; + + // make sure we resume at this height next time + indexer.drop_headers(end_block.saturating_sub(1))?; } } diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index f312968524..fb840e9fa0 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -132,6 +132,12 @@ CREATE TABLE burnchain_db_block_ops ( CREATE TABLE db_config(version TEXT NOT NULL);"; +const BURNCHAIN_DB_INDEXES: &'static [&'static str] = &[ + "CREATE INDEX IF NOT EXISTS index_burnchain_db_block_headers_height_hash ON burnchain_db_block_headers(block_height DESC, block_hash ASC);", + "CREATE INDEX IF NOT EXISTS index_burnchain_db_block_hash ON burnchain_db_block_ops(block_hash);", + "CREATE INDEX IF NOT EXISTS index_burnchain_db_txid ON burnchain_db_block_ops(txid);", +]; + impl<'a> BurnchainDBTransaction<'a> { fn store_burnchain_db_entry( &self, @@ -177,6 +183,15 @@ impl<'a> BurnchainDBTransaction<'a> { } impl BurnchainDB { + fn add_indexes(&mut self) -> Result<(), BurnchainError> { + let db_tx = self.tx_begin()?; + for index in BURNCHAIN_DB_INDEXES.iter() { + db_tx.sql_tx.execute_batch(index)?; + } + db_tx.commit()?; + Ok(()) + } + pub fn connect( path: &str, first_block_height: u64, @@ -233,6 +248,9 @@ impl BurnchainDB { db_tx.commit()?; } + if readwrite { + db.add_indexes()?; + } Ok(db) } @@ -243,7 +261,12 @@ impl BurnchainDB { OpenFlags::SQLITE_OPEN_READ_ONLY }; let conn = sqlite_open(path, open_flags, true)?; - Ok(BurnchainDB { conn }) + let mut db = BurnchainDB { conn }; + + if readwrite { + db.add_indexes()?; + } + Ok(db) } fn tx_begin<'a>(&'a mut self) -> Result, BurnchainError> { @@ -254,7 +277,7 @@ impl BurnchainDB { pub fn get_canonical_chain_tip(&self) -> Result { let qry = "SELECT * FROM burnchain_db_block_headers ORDER BY block_height DESC, block_hash ASC LIMIT 1"; let opt = query_row(&self.conn, qry, NO_PARAMS)?; - Ok(opt.expect("CORRUPTION: No canonical burnchain tip")) + opt.ok_or(BurnchainError::MissingParentBlock) } pub fn get_burnchain_block( diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 9e66e2dd80..a8c9a08007 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -483,12 +483,6 @@ const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ PRIMARY KEY(sortition_id) );"#, - "CREATE INDEX snapshots_block_hashes ON snapshots(block_height,index_root,winning_stacks_block_hash);", - "CREATE INDEX snapshots_block_stacks_hashes ON snapshots(num_sortitions,index_root,winning_stacks_block_hash);", - "CREATE INDEX snapshots_block_heights ON snapshots(burn_header_hash,block_height);", - "CREATE INDEX snapshots_block_winning_hash ON snapshots(winning_stacks_block_hash);", - "CREATE INDEX block_arrivals ON snapshots(arrival_index,burn_header_hash);", - "CREATE INDEX arrival_indexes ON snapshots(arrival_index);", r#" CREATE TABLE snapshot_transition_ops( sortition_id TEXT PRIMARY KEY, @@ -602,7 +596,6 @@ const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ block_height INTEGER NOT NULL, PRIMARY KEY(consensus_hash, stacks_block_hash) );"#, - "CREATE INDEX canonical_stacks_blocks ON canonical_accepted_stacks_blocks(tip_consensus_hash,stacks_block_hash);", "CREATE TABLE db_config(version TEXT PRIMARY KEY);", ]; @@ -616,6 +609,26 @@ const SORTITION_DB_SCHEMA_2: &'static [&'static str] = &[r#" PRIMARY KEY(start_block_height,epoch_id) );"#]; +const SORTITION_DB_INDEXES: &'static [&'static str] = &[ + "CREATE INDEX IF NOT EXISTS snapshots_block_hashes ON snapshots(block_height,index_root,winning_stacks_block_hash);", + "CREATE INDEX IF NOT EXISTS snapshots_block_stacks_hashes ON snapshots(num_sortitions,index_root,winning_stacks_block_hash);", + "CREATE INDEX IF NOT EXISTS snapshots_block_heights ON snapshots(burn_header_hash,block_height);", + "CREATE INDEX IF NOT EXISTS snapshots_block_winning_hash ON snapshots(winning_stacks_block_hash);", + "CREATE INDEX IF NOT EXISTS snapshots_canonical_chain_tip ON snapshots(pox_valid,block_height DESC,burn_header_hash ASC);", + "CREATE INDEX IF NOT EXISTS block_arrivals ON snapshots(arrival_index,burn_header_hash);", + "CREATE INDEX IF NOT EXISTS arrival_indexes ON snapshots(arrival_index);", + "CREATE INDEX IF NOT EXISTS index_leader_keys_sortition_id_block_height_vtxindex ON leader_keys(sortition_id,block_height,vtxindex);", + "CREATE INDEX IF NOT EXISTS index_block_commits_sortition_id_vtxindex ON block_commits(sortition_id,vtxindex);", + "CREATE INDEX IF NOT EXISTS index_block_commits_sortition_id_block_height_vtxindex ON block_commits(sortition_id,block_height,vtxindex);", + "CREATE INDEX IF NOT EXISTS index_user_burn_support_txid ON user_burn_support(txid);", + "CREATE INDEX IF NOT EXISTS index_user_burn_support_sortition_id_vtxindex ON user_burn_support(sortition_id,vtxindex);", + "CREATE INDEX IF NOT EXISTS index_user_burn_support_sortition_id_hash_160_key_vtxindex_key_block_ptr_vtxindex ON user_burn_support(sortition_id,block_header_hash_160,key_vtxindex,key_block_ptr,vtxindex ASC);", + "CREATE INDEX IF NOT EXISTS index_stack_stx_burn_header_hash ON stack_stx(burn_header_hash);", + "CREATE INDEX IF NOT EXISTS index_transfer_stx_burn_header_hash ON transfer_stx(burn_header_hash);", + "CREATE INDEX IF NOT EXISTS index_missed_commits_intended_sortition_id ON missed_commits(intended_sortition_id);", + "CREATE INDEX IF NOT EXISTS canonical_stacks_blocks ON canonical_accepted_stacks_blocks(tip_consensus_hash,stacks_block_hash);" +]; + pub struct SortitionDB { pub readwrite: bool, pub marf: MARF, @@ -1573,8 +1586,8 @@ impl<'a> SortitionHandleConn<'a> { let winning_block_hash160 = Hash160::from_sha256(snapshot.winning_stacks_block_hash.as_bytes()); - let qry = "SELECT * FROM user_burn_support - WHERE sortition_id = ?1 AND block_header_hash_160 = ?2 AND key_vtxindex = ?3 AND key_block_ptr = ?4 + let qry = "SELECT * FROM user_burn_support \ + WHERE sortition_id = ?1 AND block_header_hash_160 = ?2 AND key_vtxindex = ?3 AND key_block_ptr = ?4 \ ORDER BY vtxindex ASC"; let args: [&dyn ToSql; 4] = [ &snapshot.sortition_id, @@ -1586,7 +1599,7 @@ impl<'a> SortitionHandleConn<'a> { let mut winning_user_burns: Vec = query_rows(self, qry, &args)?; // were there multiple miners with the same VRF key and block header hash? (i.e., are these user burns shared?) - let qry = "SELECT COUNT(*) FROM block_commits + let qry = "SELECT COUNT(*) FROM block_commits \ WHERE sortition_id = ?1 AND block_header_hash = ?2 AND key_vtxindex = ?3 AND key_block_ptr = ?4"; let args: [&dyn ToSql; 4] = [ &snapshot.sortition_id, @@ -2113,6 +2126,9 @@ impl SortitionDB { } db.check_schema_version_and_update(epochs)?; + if readwrite { + db.add_indexes()?; + } Ok(db) } @@ -2296,6 +2312,8 @@ impl SortitionDB { )?; db_tx.commit()?; + + self.add_indexes()?; Ok(()) } @@ -2494,6 +2512,15 @@ impl SortitionDB { Err(e) => panic!("Error obtaining the version of the sortition DB: {:?}", e), } } + + fn add_indexes(&mut self) -> Result<(), db_error> { + let tx = self.tx_begin()?; + for row_text in SORTITION_DB_INDEXES { + tx.execute_batch(row_text)?; + } + tx.commit()?; + Ok(()) + } } impl<'a> SortitionDBConn<'a> { diff --git a/src/chainstate/burn/operations/leader_key_register.rs b/src/chainstate/burn/operations/leader_key_register.rs index 2ba44ba8f4..afb54f1ce5 100644 --- a/src/chainstate/burn/operations/leader_key_register.rs +++ b/src/chainstate/burn/operations/leader_key_register.rs @@ -45,7 +45,7 @@ use util::vrf::{VRFPrivateKey, VRFPublicKey, VRF}; use crate::types::chainstate::BlockHeaderHash; use crate::types::chainstate::BurnchainHeaderHash; -struct ParsedData { +pub struct ParsedData { pub consensus_hash: ConsensusHash, pub public_key: VRFPublicKey, pub memo: Vec, diff --git a/src/chainstate/burn/operations/mod.rs b/src/chainstate/burn/operations/mod.rs index 1317ff5b47..bb1f2de68d 100644 --- a/src/chainstate/burn/operations/mod.rs +++ b/src/chainstate/burn/operations/mod.rs @@ -245,7 +245,7 @@ pub struct LeaderKeyRegisterOp { pub consensus_hash: ConsensusHash, // consensus hash at time of issuance pub public_key: VRFPublicKey, // EdDSA public key pub memo: Vec, // extra bytes in the op-return - pub address: StacksAddress, // hash of public key(s) that will send the leader block commit + pub address: StacksAddress, // NOTE: no longer used for anything consensus-critical, but identifies the change address output // common to all transactions pub txid: Txid, // transaction ID @@ -254,6 +254,7 @@ pub struct LeaderKeyRegisterOp { pub burn_header_hash: BurnchainHeaderHash, // hash of burn chain block } +/// NOTE: this struct is currently not used #[derive(Debug, PartialEq, Clone, Eq, Serialize, Deserialize)] pub struct UserBurnSupportOp { pub address: StacksAddress, @@ -378,7 +379,6 @@ impl fmt::Display for BlockstackOperationType { BlockstackOperationType::LeaderKeyRegister(ref op) => write!(f, "{:?}", op), BlockstackOperationType::PreStx(ref op) => write!(f, "{:?}", op), BlockstackOperationType::StackStx(ref op) => write!(f, "{:?}", op), - BlockstackOperationType::LeaderBlockCommit(ref op) => write!(f, "{:?}", op), BlockstackOperationType::UserBurnSupport(ref op) => write!(f, "{:?}", op), BlockstackOperationType::TransferStx(ref op) => write!(f, "{:?}", op), diff --git a/src/chainstate/burn/operations/stack_stx.rs b/src/chainstate/burn/operations/stack_stx.rs index 580bf127ee..69356778d8 100644 --- a/src/chainstate/burn/operations/stack_stx.rs +++ b/src/chainstate/burn/operations/stack_stx.rs @@ -279,6 +279,18 @@ impl StackStxOp { } } +impl StacksMessageCodec for PreStxOp { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &(Opcodes::PreStx as u8))?; + Ok(()) + } + + fn consensus_deserialize(_fd: &mut R) -> Result { + // Op deserialized through burchain indexer + unimplemented!(); + } +} + impl StacksMessageCodec for StackStxOp { /* Wire format: diff --git a/src/chainstate/burn/operations/transfer_stx.rs b/src/chainstate/burn/operations/transfer_stx.rs index 99441c69ce..d5ffe6eac5 100644 --- a/src/chainstate/burn/operations/transfer_stx.rs +++ b/src/chainstate/burn/operations/transfer_stx.rs @@ -193,18 +193,19 @@ impl TransferStxOp { impl StacksMessageCodec for TransferStxOp { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - write_next(fd, &(Opcodes::TransferStx as u8))?; - fd.write_all(&self.transfered_ustx.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; if self.memo.len() > 61 { return Err(codec_error::ArrayTooLong); } - write_next(fd, &self.memo)?; + write_next(fd, &(Opcodes::TransferStx as u8))?; + fd.write_all(&self.transfered_ustx.to_be_bytes()) + .map_err(|e| codec_error::WriteError(e))?; + fd.write_all(&self.memo) + .map_err(|e| codec_error::WriteError(e))?; Ok(()) } fn consensus_deserialize(_fd: &mut R) -> Result { - // Op deserialized through burchain indexer + // Op deserialized through burnchain indexer unimplemented!(); } } diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 3b42df2e1b..47599fd474 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -42,10 +42,7 @@ use chainstate::stacks::{ Error as ChainstateError, StacksBlock, TransactionPayload, }; use core::StacksEpoch; -use monitoring::{ - increment_contract_calls_processed, increment_stx_blocks_processed_counter, - update_stacks_tip_height, -}; +use monitoring::{increment_contract_calls_processed, increment_stx_blocks_processed_counter}; use net::atlas::{AtlasConfig, AttachmentInstance}; use util::db::Error as DBError; use vm::{ @@ -698,8 +695,6 @@ impl< let sortdb_handle = self.sortition_db.tx_handle_begin(canonical_sortition_tip)?; let mut processed_blocks = self.chain_state_db.process_blocks(sortdb_handle, 1)?; - let stacks_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortition_db.conn())?; - update_stacks_tip_height(stacks_tip.canonical_stacks_tip_height as i64); while let Some(block_result) = processed_blocks.pop() { if let (Some(block_receipt), _) = block_result { diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 5f8bff657a..af0530e6e9 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -465,7 +465,12 @@ fn make_genesis_block_with_recipients( .unwrap(); let iconn = sort_db.index_conn(); - let mut epoch_tx = builder.epoch_begin(state, &iconn).unwrap().0; + let mut miner_epoch_info = builder.pre_epoch_begin(state, &iconn).unwrap(); + let mut epoch_tx = builder + .epoch_begin(&iconn, &mut miner_epoch_info) + .unwrap() + .0; + builder.try_mine_tx(&mut epoch_tx, &coinbase_op).unwrap(); let block = builder.mine_anchored_block(&mut epoch_tx); @@ -675,7 +680,12 @@ fn make_stacks_block_with_input( next_hash160(), ) .unwrap(); - let mut epoch_tx = builder.epoch_begin(state, &iconn).unwrap().0; + let mut miner_epoch_info = builder.pre_epoch_begin(state, &iconn).unwrap(); + let mut epoch_tx = builder + .epoch_begin(&iconn, &mut miner_epoch_info) + .unwrap() + .0; + builder.try_mine_tx(&mut epoch_tx, &coinbase_op).unwrap(); let block = builder.mine_anchored_block(&mut epoch_tx); diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index 68b0d0eb04..0a45e14896 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -32,6 +32,7 @@ use rusqlite::DatabaseName; use rusqlite::{Error as sqlite_error, OptionalExtension}; use crate::codec::MAX_MESSAGE_LEN; +use crate::codec::{read_next, write_next}; use chainstate::burn::db::sortdb::*; use chainstate::burn::operations::*; use chainstate::burn::BlockSnapshot; @@ -46,11 +47,14 @@ use chainstate::stacks::{ C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use clarity_vm::clarity::{ClarityBlockConnection, ClarityConnection, ClarityInstance}; +use core::mempool::MemPoolDB; use core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use core::*; use cost_estimates::EstimatorError; use net::BlocksInvData; use net::Error as net_error; +use net::ExtendedStacksHeader; +use net::MemPoolSyncData; use util::db::u64_to_sql; use util::db::Error as db_error; use util::db::{ @@ -78,6 +82,7 @@ use crate::types::chainstate::{ StacksAddress, StacksBlockHeader, StacksBlockId, StacksMicroblockHeader, }; use crate::{types, util}; +use monitoring::set_last_execution_cost_observed; use types::chainstate::BurnchainHeaderHash; #[derive(Debug, Clone, PartialEq)] @@ -152,6 +157,19 @@ pub enum MemPoolRejection { Other(String), } +pub struct SetupBlockResult<'a> { + pub clarity_tx: ClarityTx<'a>, + pub tx_receipts: Vec, + pub microblock_execution_cost: ExecutionCost, + pub microblock_fees: u128, + pub microblock_burns: u128, + pub microblock_txs_receipts: Vec, + pub matured_miner_rewards_opt: + Option<(MinerReward, Vec, MinerReward, MinerRewardInfo)>, + pub evaluated_epoch: StacksEpochId, + pub applied_epoch_transition: bool, +} + impl MemPoolRejection { pub fn into_json(self, txid: &Txid) -> serde_json::Value { use self::MemPoolRejection::*; @@ -372,28 +390,64 @@ impl StagingMicroblock { } } -impl BlockStreamData { - pub fn new_block(index_block_hash: StacksBlockId) -> BlockStreamData { - BlockStreamData { +impl MicroblockStreamData { + fn stream_count(&mut self, fd: &mut W, count: u64) -> Result { + let mut num_written = 0; + while self.num_items_ptr < self.num_items_buf.len() && num_written < count { + // stream length prefix + test_debug!( + "Length prefix: try to send {:?} (ptr={})", + &self.num_items_buf[self.num_items_ptr..], + self.num_items_ptr + ); + let num_sent = match fd.write(&self.num_items_buf[self.num_items_ptr..]) { + Ok(0) => { + // done (disconnected) + test_debug!("Length prefix: wrote 0 bytes",); + return Ok(num_written); + } + Ok(n) => { + self.num_items_ptr += n; + n as u64 + } + Err(e) => { + if e.kind() == io::ErrorKind::Interrupted { + // EINTR; try again + continue; + } else if e.kind() == io::ErrorKind::WouldBlock + || (cfg!(windows) && e.kind() == io::ErrorKind::TimedOut) + { + // blocked + return Ok(num_written); + } else { + return Err(Error::WriteError(e)); + } + } + }; + num_written += num_sent; + test_debug!( + "Length prefix: sent {} bytes ({} total)", + num_sent, + num_written + ); + } + Ok(num_written) + } +} + +impl StreamCursor { + pub fn new_block(index_block_hash: StacksBlockId) -> StreamCursor { + StreamCursor::Block(BlockStreamData { index_block_hash: index_block_hash, - rowid: None, offset: 0, total_bytes: 0, - - is_microblock: false, - microblock_hash: BlockHeaderHash([0u8; 32]), - parent_index_block_hash: StacksBlockId([0u8; 32]), - seq: 0, - unconfirmed: false, - num_mblocks_buf: [0u8; 4], - num_mblocks_ptr: 0, - } + }) } pub fn new_microblock_confirmed( chainstate: &StacksChainState, tail_index_microblock_hash: StacksBlockId, - ) -> Result { + ) -> Result { // look up parent let mblock_info = StacksChainState::load_staging_microblock_info_indexed( &chainstate.db(), @@ -408,29 +462,27 @@ impl BlockStreamData { // need to send out the consensus_serialize()'ed array length before sending microblocks. // this is exactly what seq tells us, though. - let num_mblocks_buf = ((mblock_info.sequence as u32) + 1).to_be_bytes(); + let num_items_buf = ((mblock_info.sequence as u32) + 1).to_be_bytes(); - Ok(BlockStreamData { + Ok(StreamCursor::Microblocks(MicroblockStreamData { index_block_hash: StacksBlockId([0u8; 32]), rowid: None, offset: 0, total_bytes: 0, - - is_microblock: true, microblock_hash: mblock_info.microblock_hash, parent_index_block_hash: parent_index_block_hash, seq: mblock_info.sequence, unconfirmed: false, - num_mblocks_buf: num_mblocks_buf, - num_mblocks_ptr: 0, - }) + num_items_buf: num_items_buf, + num_items_ptr: 0, + })) } pub fn new_microblock_unconfirmed( chainstate: &StacksChainState, anchored_index_block_hash: StacksBlockId, seq: u16, - ) -> Result { + ) -> Result { let mblock_info = StacksChainState::load_next_descendant_microblock( &chainstate.db(), &anchored_index_block_hash, @@ -438,89 +490,209 @@ impl BlockStreamData { )? .ok_or(Error::NoSuchBlockError)?; - Ok(BlockStreamData { + Ok(StreamCursor::Microblocks(MicroblockStreamData { index_block_hash: anchored_index_block_hash.clone(), rowid: None, offset: 0, total_bytes: 0, - - is_microblock: true, microblock_hash: mblock_info.block_hash(), parent_index_block_hash: anchored_index_block_hash, seq: seq, unconfirmed: true, - num_mblocks_buf: [0u8; 4], - num_mblocks_ptr: 4, // stops us from trying to send a length prefix + num_items_buf: [0u8; 4], + num_items_ptr: 4, // stops us from trying to send a length prefix + })) + } + + pub fn new_headers( + chainstate: &StacksChainState, + tip: &StacksBlockId, + num_headers_requested: u32, + ) -> Result { + let header_info = StacksChainState::load_staging_block_info(chainstate.db(), tip)? + .ok_or(Error::NoSuchBlockError)?; + + let num_headers = if header_info.height < (num_headers_requested as u64) { + header_info.height as u32 + } else { + num_headers_requested + }; + + test_debug!("Request for {} headers from {}", num_headers, tip); + + Ok(StreamCursor::Headers(HeaderStreamData { + index_block_hash: tip.clone(), + offset: 0, + total_bytes: 0, + num_headers: num_headers, + header_bytes: None, + end_of_stream: false, + corked: false, + })) + } + + pub fn new_tx_stream( + tx_query: MemPoolSyncData, + max_txs: u64, + height: u64, + page_id_opt: Option, + ) -> StreamCursor { + let last_randomized_txid = page_id_opt.unwrap_or_else(|| { + let random_bytes = rand::thread_rng().gen::<[u8; 32]>(); + Txid(random_bytes) + }); + + StreamCursor::MempoolTxs(TxStreamData { + tx_query, + last_randomized_txid: last_randomized_txid, + tx_buf: vec![], + tx_buf_ptr: 0, + num_txs: 0, + max_txs: max_txs, + height: height, + corked: false, }) } + fn stream_one_byte(fd: &mut W, b: u8) -> Result { + loop { + match fd.write(&[b]) { + Ok(0) => { + // done (disconnected) + return Ok(0); + } + Ok(n) => { + return Ok(n as u64); + } + Err(e) => { + if e.kind() == io::ErrorKind::Interrupted { + // EINTR; try again + continue; + } else if e.kind() == io::ErrorKind::WouldBlock + || (cfg!(windows) && e.kind() == io::ErrorKind::TimedOut) + { + // blocked + return Ok(0); + } else { + return Err(Error::WriteError(e)); + } + } + } + } + } + + pub fn get_offset(&self) -> u64 { + match self { + StreamCursor::Block(ref stream) => stream.offset(), + StreamCursor::Microblocks(ref stream) => stream.offset(), + StreamCursor::Headers(ref stream) => stream.offset(), + // no-op for mempool txs + StreamCursor::MempoolTxs(..) => 0, + } + } + + pub fn add_more_bytes(&mut self, nw: u64) { + match self { + StreamCursor::Block(ref mut stream) => stream.add_bytes(nw), + StreamCursor::Microblocks(ref mut stream) => stream.add_bytes(nw), + StreamCursor::Headers(ref mut stream) => stream.add_bytes(nw), + // no-op fo mempool txs + StreamCursor::MempoolTxs(..) => (), + } + } + pub fn stream_to( &mut self, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, fd: &mut W, count: u64, ) -> Result { - if self.is_microblock { - let mut num_written = 0; - if !self.unconfirmed { - // Confirmed microblocks are represented as a consensus-encoded vector of - // microblocks, in reverse sequence order. - // Write 4-byte length prefix first - while self.num_mblocks_ptr < self.num_mblocks_buf.len() { - // stream length prefix - test_debug!( - "Confirmed microblock stream for {}: try to send length prefix {:?} (ptr={})", - &self.microblock_hash, - &self.num_mblocks_buf[self.num_mblocks_ptr..], - self.num_mblocks_ptr - ); - let num_sent = match fd.write(&self.num_mblocks_buf[self.num_mblocks_ptr..]) { - Ok(0) => { - // done (disconnected) - test_debug!( - "Confirmed microblock stream for {}: wrote 0 bytes", - &self.microblock_hash - ); - return Ok(num_written); - } - Ok(n) => { - self.num_mblocks_ptr += n; - n as u64 - } - Err(e) => { - if e.kind() == io::ErrorKind::Interrupted { - // EINTR; try again - continue; - } else if e.kind() == io::ErrorKind::WouldBlock - || (cfg!(windows) && e.kind() == io::ErrorKind::TimedOut) - { - // blocked - return Ok(num_written); - } else { - return Err(Error::WriteError(e)); - } + match self { + StreamCursor::Microblocks(ref mut stream) => { + let mut num_written = 0; + if !stream.unconfirmed { + // Confirmed microblocks are represented as a consensus-encoded vector of + // microblocks, in reverse sequence order. + // Write 4-byte length prefix first + num_written += stream.stream_count(fd, count)?; + StacksChainState::stream_microblocks_confirmed(&chainstate, fd, stream, count) + .and_then(|bytes_sent| Ok(bytes_sent + num_written)) + } else { + StacksChainState::stream_microblocks_unconfirmed(&chainstate, fd, stream, count) + .and_then(|bytes_sent| Ok(bytes_sent + num_written)) + } + } + StreamCursor::MempoolTxs(ref mut tx_stream) => mempool.stream_txs(fd, tx_stream, count), + StreamCursor::Headers(ref mut stream) => { + let mut num_written = 0; + if stream.total_bytes == 0 { + test_debug!("Opening header stream"); + let byte_written = StreamCursor::stream_one_byte(fd, '[' as u8)?; + num_written += byte_written; + stream.total_bytes += byte_written; + } + if stream.total_bytes > 0 { + let mut sent = chainstate.stream_headers(fd, stream, count)?; + + if stream.end_of_stream && !stream.corked { + // end of stream; cork it + test_debug!("Corking header stream"); + let byte_written = StreamCursor::stream_one_byte(fd, ']' as u8)?; + if byte_written > 0 { + sent += byte_written; + stream.total_bytes += byte_written; + stream.corked = true; } - }; - num_written += num_sent; - test_debug!( - "Confirmed microblock stream for {}: sent {} bytes ({} total)", - &self.microblock_hash, - num_sent, - num_written - ); + } + num_written += sent; } - StacksChainState::stream_microblocks_confirmed(&chainstate, fd, self, count) - .and_then(|bytes_sent| Ok(bytes_sent + num_written)) - } else { - StacksChainState::stream_microblocks_unconfirmed(&chainstate, fd, self, count) - .and_then(|bytes_sent| Ok(bytes_sent + num_written)) + Ok(num_written) } - } else { - chainstate.stream_block(fd, self, count) + StreamCursor::Block(ref mut stream) => chainstate.stream_block(fd, stream, count), } } } +impl Streamer for StreamCursor { + fn offset(&self) -> u64 { + self.get_offset() + } + fn add_bytes(&mut self, nw: u64) { + self.add_more_bytes(nw) + } +} + +impl Streamer for HeaderStreamData { + fn offset(&self) -> u64 { + self.offset + } + fn add_bytes(&mut self, nw: u64) { + self.offset += nw; + self.total_bytes += nw; + } +} + +impl Streamer for BlockStreamData { + fn offset(&self) -> u64 { + self.offset + } + fn add_bytes(&mut self, nw: u64) { + self.offset += nw; + self.total_bytes += nw; + } +} + +impl Streamer for MicroblockStreamData { + fn offset(&self) -> u64 { + self.offset + } + fn add_bytes(&mut self, nw: u64) { + self.offset += nw; + self.total_bytes += nw; + } +} + impl StacksChainState { fn get_index_block_pathbuf(blocks_dir: &str, index_block_hash: &StacksBlockId) -> PathBuf { let block_hash_bytes = index_block_hash.as_bytes(); @@ -581,7 +753,7 @@ impl StacksChainState { } pub fn atomic_file_store( - path: &String, + path: &str, delete_on_error: bool, mut writer: F, ) -> Result<(), Error> @@ -624,14 +796,14 @@ impl StacksChainState { Ok(()) } - pub fn atomic_file_write(path: &String, bytes: &Vec) -> Result<(), Error> { + pub fn atomic_file_write(path: &str, bytes: &Vec) -> Result<(), Error> { StacksChainState::atomic_file_store(path, false, |ref mut fd| { fd.write_all(bytes) .map_err(|e| Error::DBError(db_error::IOError(e))) }) } - pub fn get_file_size(path: &String) -> Result { + pub fn get_file_size(path: &str) -> Result { let sz = match fs::metadata(path) { Ok(md) => md.len(), Err(e) => { @@ -646,7 +818,7 @@ impl StacksChainState { Ok(sz) } - pub fn consensus_load(path: &String) -> Result { + pub fn consensus_load(path: &str) -> Result { let mut fd = fs::OpenOptions::new() .read(true) .write(false) @@ -666,7 +838,7 @@ impl StacksChainState { /// Do we have a stored a block in the chunk store? pub fn has_block_indexed( - blocks_dir: &String, + blocks_dir: &str, index_block_hash: &StacksBlockId, ) -> Result { let block_path = StacksChainState::get_index_block_path(blocks_dir, index_block_hash)?; @@ -685,7 +857,7 @@ impl StacksChainState { /// Have we processed and stored a particular block? pub fn has_stored_block( blocks_db: &DBConn, - blocks_dir: &String, + blocks_dir: &str, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result { @@ -709,7 +881,7 @@ impl StacksChainState { /// Store a block to the chunk store, named by its hash pub fn store_block( - blocks_dir: &String, + blocks_dir: &str, consensus_hash: &ConsensusHash, block: &StacksBlock, ) -> Result<(), Error> { @@ -730,7 +902,7 @@ impl StacksChainState { /// Store an empty block to the chunk store, named by its hash. #[cfg(test)] fn store_empty_block( - blocks_path: &String, + blocks_path: &str, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result<(), Error> { @@ -787,7 +959,7 @@ impl StacksChainState { /// Free up all state for an invalid block fn free_block_state( - blocks_path: &String, + blocks_path: &str, consensus_hash: &ConsensusHash, block_header: &StacksBlockHeader, ) -> () { @@ -818,7 +990,7 @@ impl StacksChainState { #[cfg(test)] pub fn list_microblocks( blocks_conn: &DBConn, - blocks_dir: &String, + blocks_dir: &str, ) -> Result)>, Error> { let mut blocks = StacksChainState::list_blocks(blocks_conn)?; let mut ret = vec![]; @@ -845,7 +1017,7 @@ impl StacksChainState { /// Returns Ok(none) if this block was found, but is known to be invalid /// Returns Err(...) on not found or I/O error pub fn load_block_bytes( - blocks_dir: &String, + blocks_dir: &str, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result>, Error> { @@ -883,7 +1055,7 @@ impl StacksChainState { /// Returns Ok(None) if this block was found, but is known to be invalid /// Returns Err(...) on not found or I/O error pub fn load_block( - blocks_dir: &String, + blocks_dir: &str, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result, Error> { @@ -898,24 +1070,40 @@ impl StacksChainState { Ok(Some(block)) } + fn inner_load_block_header(block_path: &str) -> Result, Error> { + let sz = StacksChainState::get_file_size(block_path)?; + if sz == 0 { + debug!("Zero-sized block {}", &block_path); + return Ok(None); + } + + let block_header: StacksBlockHeader = StacksChainState::consensus_load(block_path)?; + Ok(Some(block_header)) + } + /// Load up an anchored block header from the chunk store. /// Returns Ok(Some(blockheader)) if found. /// Returns Ok(None) if this block was found, but is known to be invalid /// Returns Err(...) on not found or I/O error pub fn load_block_header( - blocks_dir: &String, + blocks_dir: &str, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result, Error> { let block_path = StacksChainState::get_block_path(blocks_dir, consensus_hash, block_hash)?; - let sz = StacksChainState::get_file_size(&block_path)?; - if sz == 0 { - debug!("Zero-sized block {}", &block_hash); - return Ok(None); - } + StacksChainState::inner_load_block_header(&block_path) + } - let block_header: StacksBlockHeader = StacksChainState::consensus_load(&block_path)?; - Ok(Some(block_header)) + /// Load up an anchored block header from the chunk store, given the index block hash + /// Returns Ok(Some(blockheader)) if found. + /// Returns Ok(None) if this block was found, but is known to be invalid + /// Returns Err(...) on not found or I/O error + pub fn load_block_header_indexed( + blocks_dir: &str, + index_block_hash: &StacksBlockId, + ) -> Result, Error> { + let block_path = StacksChainState::get_index_block_path(blocks_dir, index_block_hash)?; + StacksChainState::inner_load_block_header(&block_path) } /// Closure for defaulting to an empty microblock stream if a microblock stream file is not found @@ -933,7 +1121,7 @@ impl StacksChainState { /// Query should be structured to return rows of BLOBs fn load_block_data_blobs

( conn: &DBConn, - sql_query: &String, + sql_query: &str, sql_args: P, ) -> Result>, Error> where @@ -1015,7 +1203,7 @@ impl StacksChainState { /// Load up a preprocessed (queued) but still unprocessed block. pub fn load_staging_block( block_conn: &DBConn, - blocks_path: &String, + blocks_path: &str, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result, Error> { @@ -1068,7 +1256,7 @@ impl StacksChainState { #[cfg(test)] fn load_staging_block_data( block_conn: &DBConn, - blocks_path: &String, + blocks_path: &str, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result, Error> { @@ -1128,7 +1316,7 @@ impl StacksChainState { /// Load up a block's microblock public key hash, staging or not fn load_block_pubkey_hash( block_conn: &DBConn, - block_path: &String, + block_path: &str, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result, Error> { @@ -1503,7 +1691,7 @@ impl StacksChainState { /// Doesn't matter if it's staging or not. pub fn load_parent_block_header( sort_ic: &SortitionDBConn, - blocks_path: &String, + blocks_path: &str, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, ) -> Result, Error> { @@ -1547,7 +1735,7 @@ impl StacksChainState { /// chain. fn store_staging_block<'a>( tx: &mut DBTx<'a>, - blocks_path: &String, + blocks_path: &str, consensus_hash: &ConsensusHash, block: &StacksBlock, parent_consensus_hash: &ConsensusHash, @@ -1947,6 +2135,7 @@ impl StacksChainState { let block_bench_start = get_epoch_time_ms(); let mut parent_microblock_hash = None; + // TODO: just do a stat? cache this? match StacksChainState::load_block_header( &self.blocks_path, &consensus_hash, @@ -1974,6 +2163,7 @@ impl StacksChainState { let mblock_bench_begin = get_epoch_time_ms(); if let Some(parent_microblock) = parent_microblock_hash { + // TODO: can we cache this? if self.has_processed_microblocks_at_tail( &index_block_hash, &parent_microblock, @@ -2055,7 +2245,7 @@ impl StacksChainState { /// The blocks database will eventually delete all orphaned data. fn delete_orphaned_epoch_data<'a>( tx: &mut DBTx<'a>, - blocks_path: &String, + blocks_path: &str, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, ) -> Result<(), Error> { @@ -2117,7 +2307,7 @@ impl StacksChainState { fn set_block_processed<'a, 'b>( tx: &mut DBTx<'a>, mut sort_tx_opt: Option<&mut SortitionHandleTx<'b>>, - blocks_path: &String, + blocks_path: &str, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, accept: bool, @@ -2248,7 +2438,7 @@ impl StacksChainState { #[cfg(test)] fn set_block_orphaned<'a>( tx: &mut DBTx<'a>, - blocks_path: &String, + blocks_path: &str, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, ) -> Result<(), Error> { @@ -2590,29 +2780,6 @@ impl StacksChainState { ) } - /// Given an index microblock hash, get the microblock hash and its anchored block and - /// consensus hash - pub fn get_microblock_parent_header_hashes( - blocks_conn: &DBConn, - index_microblock_hash: &StacksBlockId, - ) -> Result, Error> { - let sql = format!("SELECT consensus_hash,anchored_block_hash,microblock_hash FROM staging_microblocks WHERE index_microblock_hash = ?1"); - let args = [index_microblock_hash as &dyn ToSql]; - - blocks_conn - .query_row(&sql, &args, |row| { - let consensus_hash = ConsensusHash::from_column(row, "consensus_hash") - .expect("Expected consensus_hash - database corrupted"); - let anchored_block_hash = BlockHeaderHash::from_column(row, "anchored_block_hash") - .expect("Expected anchored_block_hash - database corrupted"); - let microblock_hash = BlockHeaderHash::from_column(row, "microblock_hash") - .expect("Expected microblock_hash - database corrupted"); - Ok((consensus_hash, anchored_block_hash, microblock_hash)) - }) - .optional() - .map_err(|e| Error::DBError(db_error::SqliteError(e))) - } - /// Get the sqlite rowid for a staging microblock, given the hash of the microblock. /// Returns None if no such microblock. fn stream_microblock_get_rowid( @@ -2646,33 +2813,223 @@ impl StacksChainState { Ok(microblock_info) } - /// Stream data from one Read to one Write - fn stream_data( + /// Write header data to the fd + fn write_stream_data( fd: &mut W, - stream: &mut BlockStreamData, + stream: &mut S, input: &mut R, count: u64, ) -> Result { - input - .seek(SeekFrom::Start(stream.offset)) - .map_err(Error::ReadError)?; - let mut buf = vec![0u8; count as usize]; let nr = input.read(&mut buf).map_err(Error::ReadError)?; fd.write_all(&buf[0..nr]).map_err(Error::WriteError)?; - stream.offset += nr as u64; - stream.total_bytes += nr as u64; + stream.add_bytes(nr as u64); Ok(nr as u64) } + /// Stream header data from one Read to one Write + fn stream_data( + fd: &mut W, + stream: &mut S, + input: &mut R, + count: u64, + ) -> Result { + input + .seek(SeekFrom::Start(stream.offset())) + .map_err(Error::ReadError)?; + + StacksChainState::write_stream_data(fd, stream, input, count) + } + + /// Stream a single header's data from disk + /// If this method returns 0, it's because we're EOF on the header and should begin the next. + fn stream_one_header( + blocks_conn: &DBConn, + block_path: &str, + fd: &mut W, + stream: &mut HeaderStreamData, + count: u64, + ) -> Result { + if stream.header_bytes.is_none() && stream.num_headers > 0 { + let header = + StacksChainState::load_block_header_indexed(block_path, &stream.index_block_hash)? + .ok_or(Error::NoSuchBlockError)?; + + let header_info = + StacksChainState::load_staging_block_info(blocks_conn, &stream.index_block_hash)? + .ok_or(Error::NoSuchBlockError)?; + + let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( + &header_info.parent_consensus_hash, + &header_info.parent_anchored_block_hash, + ); + + let mut header_bytes = vec![]; + let extended_header = ExtendedStacksHeader { + consensus_hash: header_info.consensus_hash, + header: header, + parent_block_id: parent_index_block_hash, + }; + + serde_json::to_writer(&mut header_bytes, &extended_header).map_err(|e| { + Error::NetError(net_error::SerializeError(format!( + "Failed to send as JSON: {:?}", + &e + ))) + })?; + + if stream.num_headers > 1 { + header_bytes.push(',' as u8); + } + + test_debug!( + "header_bytes: {}", + String::from_utf8(header_bytes.clone()).unwrap() + ); + + stream.header_bytes = Some(header_bytes); + stream.offset = 0; + } + + if stream.header_bytes.is_some() { + let header_bytes = stream + .header_bytes + .take() + .expect("Do not have header bytes and did not set them"); + let res = (|| { + if stream.offset >= (header_bytes.len() as u64) { + // EOF + return Ok(0); + } + + let num_bytes = StacksChainState::write_stream_data( + fd, + stream, + &mut &header_bytes[(stream.offset as usize)..], + count, + )?; + test_debug!( + "Stream header hash={} offset={} total_bytes={}, num_bytes={} num_headers={}", + &stream.index_block_hash, + stream.offset, + stream.total_bytes, + num_bytes, + stream.num_headers + ); + Ok(num_bytes) + })(); + stream.header_bytes = Some(header_bytes); + res + } else { + Ok(0) + } + } + + /// Stream multiple headers from disk, moving in reverse order from the chain tip back. + /// Returns total number of bytes written (will be equal to the number of bytes read). + /// Returns 0 if we run out of headers + fn stream_headers( + &self, + fd: &mut W, + stream: &mut HeaderStreamData, + count: u64, + ) -> Result { + let mut to_write = count; + while to_write > 0 { + let nw = match StacksChainState::stream_one_header( + &self.db(), + &self.blocks_path, + fd, + stream, + to_write, + ) { + Ok(nw) => nw, + Err(Error::DBError(db_error::NotFoundError)) => { + // out of headers + debug!( + "No more header to stream after {}", + &stream.index_block_hash + ); + stream.header_bytes = None; + stream.end_of_stream = true; + break; + } + Err(e) => { + return Err(e); + } + }; + + if nw == 0 { + if stream.num_headers == 0 { + // out of headers + debug!( + "No more header to stream after {}", + &stream.index_block_hash + ); + stream.header_bytes = None; + stream.end_of_stream = true; + break; + } + + // EOF on header; move to the next one (its parent) + let header_info = match StacksChainState::load_staging_block_info( + &self.db(), + &stream.index_block_hash, + )? { + Some(x) => x, + None => { + // out of headers + debug!( + "Out of headers to stream after block {}", + &stream.index_block_hash + ); + stream.header_bytes = None; + stream.end_of_stream = true; + break; + } + }; + + let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( + &header_info.parent_consensus_hash, + &header_info.parent_anchored_block_hash, + ); + + stream.index_block_hash = parent_index_block_hash; + stream.num_headers = stream + .num_headers + .checked_sub(1) + .expect("BUG: streamed more headers than called for"); + + stream.header_bytes = None; + } else { + to_write = to_write + .checked_sub(nw) + .expect("BUG: wrote more data than called for"); + } + + debug!( + "Streaming header={}: to_write={}, nw={}", + &stream.index_block_hash, to_write, nw + ); + } + debug!( + "Streamed headers ({} remaining): {} - {} = {}", + stream.num_headers, + count, + to_write, + count - to_write + ); + Ok(count - to_write) + } + /// Stream a single microblock's data from the staging database. /// If this method returns 0, it's because we're EOF on the blob. fn stream_one_microblock( blocks_conn: &DBConn, fd: &mut W, - stream: &mut BlockStreamData, + stream: &mut MicroblockStreamData, count: u64, ) -> Result { let rowid = match stream.rowid { @@ -2730,7 +3087,7 @@ impl StacksChainState { fn stream_microblocks_confirmed( chainstate: &StacksChainState, fd: &mut W, - stream: &mut BlockStreamData, + stream: &mut MicroblockStreamData, count: u64, ) -> Result { let mut to_write = count; @@ -2795,7 +3152,7 @@ impl StacksChainState { /// Stream block data from the chunk store. fn stream_data_from_chunk_store( - blocks_path: &String, + blocks_path: &str, fd: &mut W, stream: &mut BlockStreamData, count: u64, @@ -2842,7 +3199,7 @@ impl StacksChainState { pub fn stream_microblocks_unconfirmed( chainstate: &StacksChainState, fd: &mut W, - stream: &mut BlockStreamData, + stream: &mut MicroblockStreamData, count: u64, ) -> Result { let mut to_write = count; @@ -3647,7 +4004,7 @@ impl StacksChainState { /// Returns true if an orphan block was processed fn process_next_orphaned_staging_block<'a>( blocks_tx: &mut DBTx<'a>, - blocks_path: &String, + blocks_path: &str, ) -> Result { test_debug!("Find next orphaned block"); @@ -3744,7 +4101,7 @@ impl StacksChainState { /// Returns None if not. fn find_next_staging_block<'a>( blocks_tx: &mut StacksDBTx<'a>, - blocks_path: &String, + blocks_path: &str, sort_tx: &mut SortitionHandleTx, ) -> Result, StagingBlock)>, Error> { test_debug!("Find next staging block"); @@ -3927,12 +4284,13 @@ impl StacksChainState { let mut receipts = vec![]; for microblock in microblocks.iter() { debug!("Process microblock {}", µblock.block_hash()); - for tx in microblock.txs.iter() { + for (tx_index, tx) in microblock.txs.iter().enumerate() { let (tx_fee, mut tx_receipt) = StacksChainState::process_transaction(clarity_tx, tx, false) .map_err(|e| (e, microblock.block_hash()))?; tx_receipt.microblock_header = Some(microblock.header.clone()); + tx_receipt.tx_index = tx_index as u32; fees = fees.checked_add(tx_fee as u128).expect("Fee overflow"); burns = burns .checked_add(tx_receipt.stx_burned as u128) @@ -4049,6 +4407,7 @@ impl StacksChainState { contract_analysis: None, execution_cost, microblock_header: None, + tx_index: 0, }; all_receipts.push(receipt); @@ -4103,6 +4462,7 @@ impl StacksChainState { contract_analysis: None, execution_cost: ExecutionCost::zero(), microblock_header: None, + tx_index: 0, }), Err(e) => { info!("TransferStx burn op processing error."; @@ -4124,18 +4484,21 @@ impl StacksChainState { fn process_block_transactions( clarity_tx: &mut ClarityTx, block: &StacksBlock, + mut tx_index: u32, ) -> Result<(u128, u128, Vec), Error> { let mut fees = 0u128; let mut burns = 0u128; let mut receipts = vec![]; for tx in block.txs.iter() { - let (tx_fee, tx_receipt) = + let (tx_fee, mut tx_receipt) = StacksChainState::process_transaction(clarity_tx, tx, false)?; fees = fees.checked_add(tx_fee as u128).expect("Fee overflow"); + tx_receipt.tx_index = tx_index; burns = burns .checked_add(tx_receipt.stx_burned as u128) .expect("Burns overflow"); receipts.push(tx_receipt); + tx_index += 1; } Ok((fees, burns, receipts)) } @@ -4282,6 +4645,238 @@ impl StacksChainState { Ok(parent_miner) } + /// Called in both follower and miner block assembly paths. + /// Returns clarity_tx, list of receipts, microblock execution cost, + /// microblock fees, microblock burns, list of microblock tx receipts, + /// miner rewards tuples, the stacks epoch id, and a boolean that + /// represents whether the epoch transition has been applied. + pub fn setup_block<'a>( + chainstate_tx: &'a mut ChainstateTx, + clarity_instance: &'a mut ClarityInstance, + burn_dbconn: &'a dyn BurnStateDB, + conn: &Connection, + chain_tip: &StacksHeaderInfo, + burn_tip: BurnchainHeaderHash, + burn_tip_height: u32, + parent_consensus_hash: ConsensusHash, + parent_header_hash: BlockHeaderHash, + parent_microblocks: &Vec, + mainnet: bool, + miner_id_opt: Option, + ) -> Result, Error> { + let parent_index_hash = + StacksBlockHeader::make_index_block_hash(&parent_consensus_hash, &parent_header_hash); + + // find matured miner rewards, so we can grant them within the Clarity DB tx. + let (latest_matured_miners, matured_miner_parent) = { + let latest_miners = StacksChainState::get_scheduled_block_rewards( + chainstate_tx.deref_mut(), + chain_tip, + )?; + let parent_miner = StacksChainState::get_parent_matured_miner( + chainstate_tx.deref_mut(), + mainnet, + &latest_miners, + )?; + (latest_miners, parent_miner) + }; + + let stacking_burn_ops = SortitionDB::get_stack_stx_ops(conn, &burn_tip)?; + let transfer_burn_ops = SortitionDB::get_transfer_stx_ops(conn, &burn_tip)?; + + // load the execution cost of the parent block if the executor is the follower. + // otherwise, if the executor is the miner, only load the parent cost if the parent + // microblock stream is non-empty. + let parent_block_cost = if miner_id_opt.is_none() || !parent_microblocks.is_empty() { + let cost = StacksChainState::get_stacks_block_anchored_cost( + &chainstate_tx.deref().deref(), + &parent_index_hash, + )? + .ok_or_else(|| { + Error::InvalidStacksBlock(format!( + "Failed to load parent block cost. parent_stacks_block_id = {}", + &parent_index_hash + )) + })?; + + debug!( + "Parent block {}/{} cost {:?}", + &parent_consensus_hash, &parent_header_hash, &cost + ); + cost + } else { + ExecutionCost::zero() + }; + + let mut clarity_tx = StacksChainState::chainstate_block_begin( + chainstate_tx, + clarity_instance, + burn_dbconn, + &parent_consensus_hash, + &parent_header_hash, + &MINER_BLOCK_CONSENSUS_HASH, + &MINER_BLOCK_HEADER_HASH, + ); + + let evaluated_epoch = clarity_tx.get_epoch(); + clarity_tx.reset_cost(parent_block_cost.clone()); + + let matured_miner_rewards_opt = match StacksChainState::find_mature_miner_rewards( + &mut clarity_tx, + &chain_tip, + latest_matured_miners, + matured_miner_parent, + ) { + Ok(miner_rewards_opt) => miner_rewards_opt, + Err(e) => { + if let Some(_) = miner_id_opt { + return Err(e); + } else { + let msg = format!("Failed to load miner rewards: {:?}", &e); + warn!("{}", &msg); + + clarity_tx.rollback_block(); + return Err(Error::InvalidStacksBlock(msg)); + } + } + }; + + if let Some(miner_id) = miner_id_opt { + debug!( + "Miner {}: Apply {} parent microblocks", + miner_id, + parent_microblocks.len() + ); + } + + let t1 = get_epoch_time_ms(); + + // process microblock stream. + // If we go over-budget, then we can't process this block either (which is by design) + let (microblock_fees, microblock_burns, microblock_txs_receipts) = + match StacksChainState::process_microblocks_transactions( + &mut clarity_tx, + &parent_microblocks, + ) { + Ok((fees, burns, events)) => (fees, burns, events), + Err((e, mblock_header_hash)) => { + let msg = format!( + "Invalid Stacks microblocks {},{} (offender {}): {:?}", + parent_consensus_hash, parent_header_hash, mblock_header_hash, &e + ); + warn!("{}", &msg); + + if miner_id_opt.is_none() { + clarity_tx.rollback_block(); + } + return Err(Error::InvalidStacksMicroblock(msg, mblock_header_hash)); + } + }; + + let t2 = get_epoch_time_ms(); + + if let Some(miner_id) = miner_id_opt { + debug!( + "Miner {}: Finished applying {} parent microblocks in {}ms\n", + miner_id, + parent_microblocks.len(), + t2.saturating_sub(t1) + ); + } + // find microblock cost + let mut microblock_execution_cost = clarity_tx.cost_so_far(); + microblock_execution_cost + .sub(&parent_block_cost) + .expect("BUG: block_cost + microblock_cost < block_cost"); + + // if we get here, then we need to reset the block-cost back to 0 since this begins the + // epoch defined by this miner. + clarity_tx.reset_cost(ExecutionCost::zero()); + + // is this stacks block the first of a new epoch? + let (applied_epoch_transition, mut tx_receipts) = + StacksChainState::process_epoch_transition(&mut clarity_tx, burn_tip_height)?; + + // process stacking & transfer operations from bitcoin ops + tx_receipts.extend(StacksChainState::process_stacking_ops( + &mut clarity_tx, + stacking_burn_ops, + )); + tx_receipts.extend(StacksChainState::process_transfer_ops( + &mut clarity_tx, + transfer_burn_ops, + )); + + Ok(SetupBlockResult { + clarity_tx, + tx_receipts, + microblock_execution_cost, + microblock_fees, + microblock_burns, + microblock_txs_receipts, + matured_miner_rewards_opt, + evaluated_epoch, + applied_epoch_transition, + }) + } + + /// This function is called in both `append_block` in blocks.rs (follower) and + /// `mine_anchored_block` in miner.rs. + /// Processes matured miner rewards, alters liquid supply of ustx, processes + /// stx lock events, and marks the microblock public key as used + /// Returns stx lockup events. + pub fn finish_block( + clarity_tx: &mut ClarityTx, + miner_payouts: Option<(MinerReward, Vec, MinerReward)>, + block_height: u32, + mblock_pubkey_hash: Hash160, + ) -> Result, Error> { + // add miner payments + if let Some((ref miner_reward, ref user_rewards, ref parent_reward)) = + miner_payouts.as_ref() + { + // grant in order by miner, then users + let matured_ustx = StacksChainState::process_matured_miner_rewards( + clarity_tx, + miner_reward, + user_rewards, + parent_reward, + )?; + + clarity_tx.increment_ustx_liquid_supply(matured_ustx); + } + + // process unlocks + let (new_unlocked_ustx, lockup_events) = StacksChainState::process_stx_unlocks(clarity_tx)?; + + clarity_tx.increment_ustx_liquid_supply(new_unlocked_ustx); + + // mark microblock public key as used + match StacksChainState::insert_microblock_pubkey_hash( + clarity_tx, + block_height, + &mblock_pubkey_hash, + ) { + Ok(_) => { + debug!( + "Added microblock public key {} at height {}", + &mblock_pubkey_hash, block_height + ); + } + Err(e) => { + let msg = format!( + "Failed to insert microblock pubkey hash {} at height {}: {:?}", + &mblock_pubkey_hash, block_height, &e + ); + warn!("{}", &msg); + + return Err(Error::InvalidStacksBlock(msg)); + } + } + + Ok(lockup_events) + } + /// Process the next pre-processed staging block. /// We've already processed parent_chain_tip. chain_tip refers to a block we have _not_ /// processed yet. @@ -4315,7 +4910,6 @@ impl StacksChainState { let mainnet = chainstate_tx.get_config().mainnet; let next_block_height = block.header.total_work.work; - let applied_epoch_transition; // NEW in 2.05 // if the parent marked an epoch transition -- i.e. its children necessarily run in @@ -4341,155 +4935,118 @@ impl StacksChainState { } } - // find matured miner rewards, so we can grant them within the Clarity DB tx. - let latest_matured_miners = StacksChainState::get_scheduled_block_rewards( - chainstate_tx.deref_mut(), - &parent_chain_tip, - )?; + let (parent_consensus_hash, parent_block_hash) = if block.is_first_mined() { + // has to be the sentinal hashes if this block has no parent + ( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ) + } else { + ( + parent_chain_tip.consensus_hash.clone(), + parent_chain_tip.anchored_header.block_hash(), + ) + }; - let matured_miner_parent = StacksChainState::get_parent_matured_miner( - chainstate_tx.deref_mut(), + let (last_microblock_hash, last_microblock_seq) = if microblocks.len() > 0 { + let _first_mblock_hash = microblocks[0].block_hash(); + let num_mblocks = microblocks.len(); + let last_microblock_hash = microblocks[num_mblocks - 1].block_hash(); + let last_microblock_seq = microblocks[num_mblocks - 1].header.sequence; + + debug!( + "\n\nAppend {} microblocks {}/{}-{} off of {}/{}\n", + num_mblocks, + chain_tip_consensus_hash, + _first_mblock_hash, + last_microblock_hash, + parent_consensus_hash, + parent_block_hash + ); + (last_microblock_hash, last_microblock_seq) + } else { + (EMPTY_MICROBLOCK_PARENT_HASH.clone(), 0) + }; + + if last_microblock_hash != block.header.parent_microblock + || last_microblock_seq != block.header.parent_microblock_sequence + { + // the pre-processing step should prevent this from being reached + panic!("BUG: received discontiguous headers for processing: {} (seq={}) does not connect to {} (microblock parent is {} (seq {}))", + last_microblock_hash, last_microblock_seq, block.block_hash(), block.header.parent_microblock, block.header.parent_microblock_sequence); + } + + // get the burnchain block that precedes this block's sortition + let parent_burn_hash = SortitionDB::get_block_snapshot_consensus( + &burn_dbconn.tx(), + &chain_tip_consensus_hash, + )? + .expect("BUG: Failed to load snapshot for block snapshot during Stacks block processing") + .parent_burn_header_hash; + + let SetupBlockResult { + mut clarity_tx, + mut tx_receipts, + microblock_execution_cost, + microblock_fees, + microblock_burns, + microblock_txs_receipts, + matured_miner_rewards_opt, + evaluated_epoch, + applied_epoch_transition, + } = StacksChainState::setup_block( + chainstate_tx, + clarity_instance, + burn_dbconn, + &burn_dbconn.tx(), + &parent_chain_tip, + parent_burn_hash, + chain_tip_burn_header_height, + parent_consensus_hash, + parent_block_hash, + microblocks, mainnet, - &latest_matured_miners, + None, )?; + let block_limit = clarity_tx.block_limit().unwrap_or_else(|| { + warn!("Failed to read transaction block limit"); + ExecutionCost::max_value() + }); + let ( scheduled_miner_reward, - tx_receipts, - microblock_execution_cost, block_execution_cost, matured_rewards, matured_rewards_info, parent_burn_block_hash, parent_burn_block_height, parent_burn_block_timestamp, - evaluated_epoch, ) = { - let (parent_consensus_hash, parent_block_hash) = if block.is_first_mined() { - // has to be the sentinal hashes if this block has no parent - ( - FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), - FIRST_STACKS_BLOCK_HASH.clone(), - ) - } else { - ( - parent_chain_tip.consensus_hash.clone(), - parent_chain_tip.anchored_header.block_hash(), - ) - }; - // get previous burn block stats let (parent_burn_block_hash, parent_burn_block_height, parent_burn_block_timestamp) = if block.is_first_mined() { - (BurnchainHeaderHash([0; 32]), 0, 0) - } else { - match SortitionDB::get_block_snapshot_consensus( - burn_dbconn, - &parent_consensus_hash, - )? { - Some(sn) => ( - sn.burn_header_hash, - sn.block_height as u32, - sn.burn_header_timestamp, - ), - None => { - // shouldn't happen - warn!( - "CORRUPTION: block {}/{} does not correspond to a burn block", - &parent_consensus_hash, &parent_block_hash - ); - (BurnchainHeaderHash([0; 32]), 0, 0) - } - } - }; - - let (last_microblock_hash, last_microblock_seq) = if microblocks.len() > 0 { - let _first_mblock_hash = microblocks[0].block_hash(); - let num_mblocks = microblocks.len(); - let last_microblock_hash = microblocks[num_mblocks - 1].block_hash(); - let last_microblock_seq = microblocks[num_mblocks - 1].header.sequence; - - debug!( - "\n\nAppend {} microblocks {}/{}-{} off of {}/{}\n", - num_mblocks, - chain_tip_consensus_hash, - _first_mblock_hash, - last_microblock_hash, - parent_consensus_hash, - parent_block_hash - ); - (last_microblock_hash, last_microblock_seq) - } else { - (EMPTY_MICROBLOCK_PARENT_HASH.clone(), 0) - }; - - if last_microblock_hash != block.header.parent_microblock - || last_microblock_seq != block.header.parent_microblock_sequence - { - // the pre-processing step should prevent this from being reached - panic!("BUG: received discontiguous headers for processing: {} (seq={}) does not connect to {} (microblock parent is {} (seq {}))", - last_microblock_hash, last_microblock_seq, block.block_hash(), block.header.parent_microblock, block.header.parent_microblock_sequence); - } - - // get the burnchain block that precedes this block's sortition - let parent_burn_hash = SortitionDB::get_block_snapshot_consensus( - &burn_dbconn.tx(), - &chain_tip_consensus_hash, - )? - .expect( - "BUG: Failed to load snapshot for block snapshot during Stacks block processing", - ) - .parent_burn_header_hash; - let stacking_burn_ops = - SortitionDB::get_stack_stx_ops(&burn_dbconn.tx(), &parent_burn_hash)?; - let transfer_burn_ops = - SortitionDB::get_transfer_stx_ops(&burn_dbconn.tx(), &parent_burn_hash)?; - - let parent_block_cost = StacksChainState::get_stacks_block_anchored_cost( - &chainstate_tx.deref().deref(), - &StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_block_hash, - ), - )? - .expect(&format!( - "BUG: no execution cost found for parent block {}/{}", - parent_consensus_hash, parent_block_hash - )); - - let mut clarity_tx = StacksChainState::chainstate_block_begin( - chainstate_tx, - clarity_instance, - burn_dbconn, - &parent_consensus_hash, - &parent_block_hash, - &MINER_BLOCK_CONSENSUS_HASH, - &MINER_BLOCK_HEADER_HASH, - ); - - let evaluated_epoch = clarity_tx.get_epoch(); - - debug!( - "Parent block {}/{} cost {:?}", - &parent_consensus_hash, &parent_block_hash, &parent_block_cost - ); - clarity_tx.reset_cost(parent_block_cost.clone()); - - let matured_miner_rewards_opt = match StacksChainState::find_mature_miner_rewards( - &mut clarity_tx, - parent_chain_tip, - latest_matured_miners, - matured_miner_parent, - ) { - Ok(miner_rewards_opt) => miner_rewards_opt, - Err(e) => { - let msg = format!("Failed to load miner rewards: {:?}", &e); - warn!("{}", &msg); - - clarity_tx.rollback_block(); - return Err(Error::InvalidStacksBlock(msg)); - } - }; + (BurnchainHeaderHash([0; 32]), 0, 0) + } else { + match SortitionDB::get_block_snapshot_consensus( + burn_dbconn, + &parent_consensus_hash, + )? { + Some(sn) => ( + sn.burn_header_hash, + sn.block_height as u32, + sn.burn_header_timestamp, + ), + None => { + // shouldn't happen + warn!( + "CORRUPTION: block {}/{} does not correspond to a burn block", + &parent_consensus_hash, &parent_block_hash + ); + (BurnchainHeaderHash([0; 32]), 0, 0) + } + } + }; // validation check -- is this microblock public key hash new to this fork? It must // be, or this block is invalid. @@ -4524,42 +5081,6 @@ impl StacksChainState { } } - // process microblock stream. - // If we go over-budget, then we can't process this block either (which is by design) - let (microblock_fees, microblock_burns, microblock_txs_receipts) = - match StacksChainState::process_microblocks_transactions( - &mut clarity_tx, - µblocks, - ) { - Err((e, offending_mblock_header_hash)) => { - let msg = format!( - "Invalid Stacks microblocks {},{} (offender {}): {:?}", - block.header.parent_microblock, - block.header.parent_microblock_sequence, - offending_mblock_header_hash, - &e - ); - warn!("{}", &msg); - - clarity_tx.rollback_block(); - return Err(Error::InvalidStacksMicroblock( - msg, - offending_mblock_header_hash, - )); - } - Ok((fees, burns, events)) => (fees, burns, events), - }; - - // find microblock cost - let mut microblock_cost = clarity_tx.cost_so_far(); - microblock_cost - .sub(&parent_block_cost) - .expect("BUG: block_cost + microblock_cost < block_cost"); - - // if we get here, then we need to reset the block-cost back to 0 since this begins the - // epoch defined by this miner. - clarity_tx.reset_cost(ExecutionCost::zero()); - debug!("Append block"; "block" => %format!("{}/{}", chain_tip_consensus_hash, block.block_hash()), "parent_block" => %format!("{}/{}", parent_consensus_hash, parent_block_hash), @@ -4570,28 +5091,13 @@ impl StacksChainState { "microblock_parent_count" => %microblocks.len(), "evaluated_epoch" => %evaluated_epoch); - // is this stacks block the first of a new epoch? - let (epoch_transition, mut receipts) = StacksChainState::process_epoch_transition( - &mut clarity_tx, - chain_tip_burn_header_height, - )?; - - applied_epoch_transition = epoch_transition; - - // process stacking operations from bitcoin ops - receipts.extend(StacksChainState::process_stacking_ops( - &mut clarity_tx, - stacking_burn_ops, - )); - - receipts.extend(StacksChainState::process_transfer_ops( - &mut clarity_tx, - transfer_burn_ops, - )); - // process anchored block let (block_fees, block_burns, txs_receipts) = - match StacksChainState::process_block_transactions(&mut clarity_tx, &block) { + match StacksChainState::process_block_transactions( + &mut clarity_tx, + &block, + microblock_txs_receipts.len() as u32, + ) { Err(e) => { let msg = format!("Invalid Stacks block {}: {:?}", block.block_hash(), &e); warn!("{}", &msg); @@ -4604,40 +5110,26 @@ impl StacksChainState { } }; - receipts.extend(txs_receipts.into_iter()); + tx_receipts.extend(txs_receipts.into_iter()); let block_cost = clarity_tx.cost_so_far(); - // grant matured miner rewards - let new_liquid_miner_ustx = - if let Some((ref miner_reward, ref user_rewards, ref parent_miner_reward, _)) = - matured_miner_rewards_opt.as_ref() - { - // grant in order by miner, then users - StacksChainState::process_matured_miner_rewards( - &mut clarity_tx, - miner_reward, - user_rewards, - parent_miner_reward, - )? - } else { - 0 - }; - - clarity_tx.increment_ustx_liquid_supply(new_liquid_miner_ustx); - // obtain reward info for receipt - let (matured_rewards, matured_rewards_info) = + let (matured_rewards, matured_rewards_info, miner_payouts_opt) = if let Some((miner_reward, mut user_rewards, parent_reward, reward_ptr)) = matured_miner_rewards_opt { let mut ret = vec![]; - ret.push(miner_reward); + ret.push(miner_reward.clone()); ret.append(&mut user_rewards); - ret.push(parent_reward); - (ret, Some(reward_ptr)) + ret.push(parent_reward.clone()); + ( + ret, + Some(reward_ptr), + Some((miner_reward, user_rewards, parent_reward)), + ) } else { - (vec![], None) + (vec![], None, None) }; // total burns @@ -4645,49 +5137,33 @@ impl StacksChainState { .checked_add(microblock_burns) .expect("Overflow: Too many STX burnt"); - // unlock any uSTX - let (new_unlocked_ustx, mut lockup_events) = - StacksChainState::process_stx_unlocks(&mut clarity_tx)?; + let mut lockup_events = match StacksChainState::finish_block( + &mut clarity_tx, + miner_payouts_opt, + block.header.total_work.work as u32, + block.header.microblock_pubkey_hash, + ) { + Err(Error::InvalidStacksBlock(e)) => { + clarity_tx.rollback_block(); + return Err(Error::InvalidStacksBlock(e)); + } + Err(e) => return Err(e), + Ok(lockup_events) => lockup_events, + }; // if any, append lockups events to the coinbase receipt if lockup_events.len() > 0 { // Receipts are appended in order, so the first receipt should be // the one of the coinbase transaction - if let Some(receipt) = receipts.get_mut(0) { + if let Some(receipt) = tx_receipts.get_mut(0) { if receipt.is_coinbase_tx() { receipt.events.append(&mut lockup_events); } } else { - warn!("Unable to attach lockups events, first block's transaction is not a coinbase transaction") + warn!("Unable to attach lockups events, block's first transaction is not a coinbase transaction") } } - clarity_tx.increment_ustx_liquid_supply(new_unlocked_ustx); - - // record that this microblock public key hash was used at this height - match StacksChainState::insert_microblock_pubkey_hash( - &mut clarity_tx, - block.header.total_work.work as u32, - &block.header.microblock_pubkey_hash, - ) { - Ok(_) => { - debug!( - "Added microblock public key {} at height {}", - &block.header.microblock_pubkey_hash, block.header.total_work.work - ); - } - Err(e) => { - let msg = format!( - "Failed to insert microblock pubkey hash {} at height {}: {:?}", - &block.header.microblock_pubkey_hash, block.header.total_work.work, &e - ); - warn!("{}", &msg); - - clarity_tx.rollback_block(); - return Err(Error::InvalidStacksBlock(msg)); - } - }; - let root_hash = clarity_tx.get_root_hash(); if root_hash != block.header.state_index_root { let msg = format!( @@ -4703,7 +5179,7 @@ impl StacksChainState { } debug!("Reached state root {}", root_hash; - "microblock cost" => %microblock_cost, + "microblock cost" => %microblock_execution_cost, "block cost" => %block_cost); // good to go! @@ -4742,19 +5218,16 @@ impl StacksChainState { ) .expect("FATAL: parsed and processed a block without a coinbase"); - receipts.extend(microblock_txs_receipts.into_iter()); + tx_receipts.extend(microblock_txs_receipts.into_iter()); ( scheduled_miner_reward, - receipts, - microblock_cost, block_cost, matured_rewards, matured_rewards_info, parent_burn_block_hash, parent_burn_block_height, parent_burn_block_timestamp, - evaluated_epoch, ) }; @@ -4783,6 +5256,8 @@ impl StacksChainState { chainstate_tx.log_transactions_processed(&new_tip.index_block_hash(), &tx_receipts); + set_last_execution_cost_observed(&block_execution_cost, &block_limit); + let epoch_receipt = StacksEpochReceipt { header: new_tip, tx_receipts, @@ -5303,6 +5778,8 @@ impl StacksChainState { Ok(ret) } + /// Is the given address version currently supported? + /// NOTE: not consensus-critical; only used for mempool admission fn is_valid_address_version(mainnet: bool, version: u8) -> bool { if mainnet { version == C32_ADDRESS_VERSION_MAINNET_SINGLESIG @@ -5682,6 +6159,7 @@ pub mod test { use chainstate::stacks::*; use core::mempool::*; use net::test::*; + use net::ExtendedStacksHeader; use util::db::Error as db_error; use util::db::*; use util::hash::*; @@ -5693,6 +6171,8 @@ pub mod test { use super::*; + use serde_json; + pub fn make_empty_coinbase_block(mblock_key: &StacksPrivateKey) -> StacksBlock { let privk = StacksPrivateKey::from_hex( "59e4d5e18351d6027a37920efe53c2f1cbadc50dca7d77169b7291dff936ed6d01", @@ -8645,54 +9125,125 @@ pub mod test { } } + fn stream_one_header_to_vec( + blocks_conn: &DBConn, + blocks_path: &str, + stream: &mut StreamCursor, + count: u64, + ) -> Result, chainstate_error> { + if let StreamCursor::Headers(ref mut stream) = stream { + let mut bytes = vec![]; + StacksChainState::stream_one_header(blocks_conn, blocks_path, &mut bytes, stream, count) + .map(|nr| { + assert_eq!(bytes.len(), nr as usize); + + // truncate trailing ',' if it exists + let len = bytes.len(); + if len > 0 { + if bytes[len - 1] == ',' as u8 { + let _ = bytes.pop(); + } + } + bytes + }) + } else { + panic!("not a header stream"); + } + } + fn stream_one_staging_microblock_to_vec( blocks_conn: &DBConn, - stream: &mut BlockStreamData, + stream: &mut StreamCursor, count: u64, ) -> Result, chainstate_error> { - let mut bytes = vec![]; - StacksChainState::stream_one_microblock(blocks_conn, &mut bytes, stream, count).map(|nr| { - assert_eq!(bytes.len(), nr as usize); - bytes - }) + if let StreamCursor::Microblocks(ref mut stream) = stream { + let mut bytes = vec![]; + StacksChainState::stream_one_microblock(blocks_conn, &mut bytes, stream, count).map( + |nr| { + assert_eq!(bytes.len(), nr as usize); + bytes + }, + ) + } else { + panic!("not a microblock stream"); + } } fn stream_chunk_to_vec( - blocks_path: &String, - stream: &mut BlockStreamData, + blocks_path: &str, + stream: &mut StreamCursor, count: u64, ) -> Result, chainstate_error> { + if let StreamCursor::Block(ref mut stream) = stream { + let mut bytes = vec![]; + StacksChainState::stream_data_from_chunk_store(blocks_path, &mut bytes, stream, count) + .map(|nr| { + assert_eq!(bytes.len(), nr as usize); + bytes + }) + } else { + panic!("not a block stream"); + } + } + + fn stream_headers_to_vec( + chainstate: &mut StacksChainState, + stream: &mut StreamCursor, + count: u64, + ) -> Result, chainstate_error> { + let mempool = MemPoolDB::open_test( + chainstate.mainnet, + chainstate.chain_id, + &chainstate.root_path, + ) + .unwrap(); let mut bytes = vec![]; - StacksChainState::stream_data_from_chunk_store(blocks_path, &mut bytes, stream, count).map( - |nr| { + stream + .stream_to(&mempool, chainstate, &mut bytes, count) + .map(|nr| { assert_eq!(bytes.len(), nr as usize); bytes - }, - ) + }) } fn stream_unconfirmed_microblocks_to_vec( chainstate: &mut StacksChainState, - stream: &mut BlockStreamData, + stream: &mut StreamCursor, count: u64, ) -> Result, chainstate_error> { + let mempool = MemPoolDB::open_test( + chainstate.mainnet, + chainstate.chain_id, + &chainstate.root_path, + ) + .unwrap(); let mut bytes = vec![]; - stream.stream_to(chainstate, &mut bytes, count).map(|nr| { - assert_eq!(bytes.len(), nr as usize); - bytes - }) + stream + .stream_to(&mempool, chainstate, &mut bytes, count) + .map(|nr| { + assert_eq!(bytes.len(), nr as usize); + bytes + }) } fn stream_confirmed_microblocks_to_vec( chainstate: &mut StacksChainState, - stream: &mut BlockStreamData, + stream: &mut StreamCursor, count: u64, ) -> Result, chainstate_error> { + let mempool = MemPoolDB::open_test( + chainstate.mainnet, + chainstate.chain_id, + &chainstate.root_path, + ) + .unwrap(); let mut bytes = vec![]; - stream.stream_to(chainstate, &mut bytes, count).map(|nr| { - assert_eq!(bytes.len(), nr as usize); - bytes - }) + stream + .stream_to(&mempool, chainstate, &mut bytes, count) + .map(|nr| { + assert_eq!(bytes.len(), nr as usize); + bytes + }) } fn decode_microblock_stream(mblock_bytes: &Vec) -> Vec { @@ -8739,11 +9290,11 @@ pub mod test { StacksBlockHeader::make_index_block_hash(&consensus_hash, &block.block_hash()); // can't stream a non-existant block - let mut stream = BlockStreamData::new_block(index_block_header.clone()); + let mut stream = StreamCursor::new_block(index_block_header.clone()); assert!(stream_chunk_to_vec(&chainstate.blocks_path, &mut stream, 123).is_err()); // stream unmodified - let stream_2 = BlockStreamData::new_block(index_block_header.clone()); + let stream_2 = StreamCursor::new_block(index_block_header.clone()); assert_eq!(stream, stream_2); // store block to staging @@ -8780,7 +9331,7 @@ pub mod test { set_block_processed(&mut chainstate, &consensus_hash, &block.block_hash(), true); // can still stream it - let mut stream = BlockStreamData::new_block(index_block_header.clone()); + let mut stream = StreamCursor::new_block(index_block_header.clone()); // stream from chunk store let mut all_block_bytes = vec![]; @@ -8803,6 +9354,259 @@ pub mod test { assert_eq!(staging_block, block); } + #[test] + fn stacks_db_stream_headers() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, "stacks_db_stream_headers"); + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let mut blocks: Vec = vec![]; + let mut blocks_index_hashes: Vec = vec![]; + + // make a linear stream + for i in 0..32 { + let mut block = make_empty_coinbase_block(&privk); + + if i == 0 { + block.header.total_work.work = 1; + block.header.total_work.burn = 1; + } + if i > 0 { + block.header.parent_block = blocks.get(i - 1).unwrap().block_hash(); + block.header.total_work.work = + blocks.get(i - 1).unwrap().header.total_work.work + 1; + block.header.total_work.burn = + blocks.get(i - 1).unwrap().header.total_work.burn + 1; + } + + let consensus_hash = ConsensusHash([((i + 1) as u8); 20]); + let parent_consensus_hash = ConsensusHash([(i as u8); 20]); + + store_staging_block( + &mut chainstate, + &consensus_hash, + &block, + &parent_consensus_hash, + i as u64, + i as u64, + ); + + blocks_index_hashes.push(StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block.block_hash(), + )); + blocks.push(block); + } + + let mut blocks_fork = blocks[0..16].to_vec(); + let mut blocks_fork_index_hashes = blocks_index_hashes[0..16].to_vec(); + + // make a stream that branches off + for i in 16..32 { + let mut block = make_empty_coinbase_block(&privk); + + if i == 16 { + block.header.parent_block = blocks.get(i - 1).unwrap().block_hash(); + block.header.total_work.work = + blocks.get(i - 1).unwrap().header.total_work.work + 1; + block.header.total_work.burn = + blocks.get(i - 1).unwrap().header.total_work.burn + 2; + } else { + block.header.parent_block = blocks_fork.get(i - 1).unwrap().block_hash(); + block.header.total_work.work = + blocks_fork.get(i - 1).unwrap().header.total_work.work + 1; + block.header.total_work.burn = + blocks_fork.get(i - 1).unwrap().header.total_work.burn + 2; + } + + let consensus_hash = ConsensusHash([((i + 1) as u8) | 0x80; 20]); + let parent_consensus_hash = if i == 16 { + ConsensusHash([(i as u8); 20]) + } else { + ConsensusHash([(i as u8) | 0x80; 20]) + }; + + store_staging_block( + &mut chainstate, + &consensus_hash, + &block, + &parent_consensus_hash, + i as u64, + i as u64, + ); + + blocks_fork_index_hashes.push(StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block.block_hash(), + )); + blocks_fork.push(block); + } + + // can't stream a non-existant header + assert!(StreamCursor::new_headers(&chainstate, &StacksBlockId([0x11; 32]), 1).is_err()); + + // stream back individual headers + for i in 0..blocks.len() { + let mut stream = + StreamCursor::new_headers(&chainstate, &blocks_index_hashes[i], 1).unwrap(); + let mut next_header_bytes = vec![]; + loop { + // torture test + let mut next_bytes = stream_one_header_to_vec( + &chainstate.db(), + &chainstate.blocks_path, + &mut stream, + 25, + ) + .unwrap(); + if next_bytes.len() == 0 { + break; + } + next_header_bytes.append(&mut next_bytes); + } + test_debug!("Got {} total bytes", next_header_bytes.len()); + let header: ExtendedStacksHeader = + serde_json::from_reader(&mut &next_header_bytes[..]).unwrap(); + + assert_eq!(header.consensus_hash, ConsensusHash([(i + 1) as u8; 20])); + assert_eq!(header.header, blocks[i].header); + + if i > 0 { + assert_eq!(header.parent_block_id, blocks_index_hashes[i - 1]); + } + } + + // stream back a run of headers + let block_expected_headers: Vec = + blocks.iter().rev().map(|blk| blk.header.clone()).collect(); + + let block_expected_index_hashes: Vec = blocks_index_hashes + .iter() + .rev() + .map(|idx| idx.clone()) + .collect(); + + let block_fork_expected_headers: Vec = blocks_fork + .iter() + .rev() + .map(|blk| blk.header.clone()) + .collect(); + + let block_fork_expected_index_hashes: Vec = blocks_fork_index_hashes + .iter() + .rev() + .map(|idx| idx.clone()) + .collect(); + + // get them all -- ask for more than there is + let mut stream = + StreamCursor::new_headers(&chainstate, blocks_index_hashes.last().unwrap(), 4096) + .unwrap(); + let header_bytes = + stream_headers_to_vec(&mut chainstate, &mut stream, 1024 * 1024).unwrap(); + + eprintln!( + "headers: {}", + String::from_utf8(header_bytes.clone()).unwrap() + ); + let headers: Vec = + serde_json::from_reader(&mut &header_bytes[..]).unwrap(); + + assert_eq!(headers.len(), block_expected_headers.len()); + for ((i, h), eh) in headers + .iter() + .enumerate() + .zip(block_expected_headers.iter()) + { + assert_eq!(h.header, *eh); + assert_eq!(h.consensus_hash, ConsensusHash([(32 - i) as u8; 20])); + if i + 1 < block_expected_index_hashes.len() { + assert_eq!(h.parent_block_id, block_expected_index_hashes[i + 1]); + } + } + + let mut stream = + StreamCursor::new_headers(&chainstate, blocks_fork_index_hashes.last().unwrap(), 4096) + .unwrap(); + let header_bytes = + stream_headers_to_vec(&mut chainstate, &mut stream, 1024 * 1024).unwrap(); + let fork_headers: Vec = + serde_json::from_reader(&mut &header_bytes[..]).unwrap(); + + assert_eq!(fork_headers.len(), block_fork_expected_headers.len()); + for ((i, h), eh) in fork_headers + .iter() + .enumerate() + .zip(block_fork_expected_headers.iter()) + { + let consensus_hash = if i >= 16 { + ConsensusHash([((32 - i) as u8); 20]) + } else { + ConsensusHash([((32 - i) as u8) | 0x80; 20]) + }; + + assert_eq!(h.header, *eh); + assert_eq!(h.consensus_hash, consensus_hash); + if i + 1 < block_fork_expected_index_hashes.len() { + assert_eq!(h.parent_block_id, block_fork_expected_index_hashes[i + 1]); + } + } + + assert_eq!(fork_headers[16..32], headers[16..32]); + + // ask for only a few + let mut stream = + StreamCursor::new_headers(&chainstate, blocks_index_hashes.last().unwrap(), 10) + .unwrap(); + let mut header_bytes = vec![]; + loop { + // torture test + let mut next_bytes = stream_headers_to_vec(&mut chainstate, &mut stream, 17).unwrap(); + if next_bytes.len() == 0 { + break; + } + header_bytes.append(&mut next_bytes); + } + + eprintln!( + "header bytes: {}", + String::from_utf8(header_bytes.clone()).unwrap() + ); + + let headers: Vec = + serde_json::from_reader(&mut &header_bytes[..]).unwrap(); + + assert_eq!(headers.len(), 10); + for (i, hdr) in headers.iter().enumerate() { + assert_eq!(hdr.header, block_expected_headers[i]); + assert_eq!(hdr.parent_block_id, block_expected_index_hashes[i + 1]); + } + + // ask for only a few + let mut stream = + StreamCursor::new_headers(&chainstate, blocks_fork_index_hashes.last().unwrap(), 10) + .unwrap(); + let mut header_bytes = vec![]; + loop { + // torture test + let mut next_bytes = stream_headers_to_vec(&mut chainstate, &mut stream, 17).unwrap(); + if next_bytes.len() == 0 { + break; + } + header_bytes.append(&mut next_bytes); + } + let headers: Vec = + serde_json::from_reader(&mut &header_bytes[..]).unwrap(); + + assert_eq!(headers.len(), 10); + for (i, hdr) in headers.iter().enumerate() { + assert_eq!(hdr.header, block_fork_expected_headers[i]); + assert_eq!(hdr.parent_block_id, block_fork_expected_index_hashes[i + 1]); + } + } + #[test] fn stacks_db_stream_staging_microblocks() { let mut chainstate = @@ -8822,15 +9626,19 @@ pub mod test { StacksBlockHeader::make_index_block_hash(&consensus_hash, &block.block_hash()); // can't stream a non-existant microblock - let mut stream = BlockStreamData::new_block(index_block_header.clone()); - assert!(StacksChainState::stream_one_microblock( - &chainstate.db(), - &mut vec![], - &mut stream, - 123 - ) - .is_err()); - assert!(stream.rowid.is_none()); + if let Err(super::Error::NoSuchBlockError) = + StreamCursor::new_microblock_confirmed(&chainstate, index_block_header.clone()) + { + } else { + panic!("Opened nonexistant microblock"); + } + + if let Err(super::Error::NoSuchBlockError) = + StreamCursor::new_microblock_unconfirmed(&chainstate, index_block_header.clone(), 0) + { + } else { + panic!("Opened nonexistant microblock"); + } // store microblocks to staging and stream them back for (i, mblock) in mblocks.iter().enumerate() { @@ -8845,7 +9653,7 @@ pub mod test { let mut staging_mblocks = vec![]; for j in 0..(i + 1) { let mut next_mblock_bytes = vec![]; - let mut stream = BlockStreamData::new_microblock_unconfirmed( + let mut stream = StreamCursor::new_microblock_unconfirmed( &chainstate, index_block_header.clone(), j as u16, @@ -8883,7 +9691,7 @@ pub mod test { for k in 0..(i + 1) { test_debug!("start at seq {}", k); let mut staging_mblock_bytes = vec![]; - let mut stream = BlockStreamData::new_microblock_unconfirmed( + let mut stream = StreamCursor::new_microblock_unconfirmed( &chainstate, index_block_header.clone(), k as u16, @@ -8995,7 +9803,7 @@ pub mod test { // verify that we can stream everything let microblock_index_header = StacksBlockHeader::make_index_block_hash(&consensus_hash, &mblocks[i].block_hash()); - let mut stream = BlockStreamData::new_microblock_confirmed( + let mut stream = StreamCursor::new_microblock_confirmed( &chainstate, microblock_index_header.clone(), ) diff --git a/src/chainstate/stacks/db/mod.rs b/src/chainstate/stacks/db/mod.rs index bc4e6258eb..23579a52f9 100644 --- a/src/chainstate/stacks/db/mod.rs +++ b/src/chainstate/stacks/db/mod.rs @@ -58,6 +58,7 @@ use clarity_vm::clarity::{ use core::*; use net::atlas::BNS_CHARS_REGEX; use net::Error as net_error; +use net::MemPoolSyncData; use util::db::Error as db_error; use util::db::{ query_count, query_row, tx_begin_immediate, tx_busy_handler, DBConn, DBTx, FromColumn, FromRow, @@ -473,22 +474,90 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -/// Opaque structure for streaming block and microblock data from disk +/// Interface for streaming data +pub trait Streamer { + fn offset(&self) -> u64; + fn add_bytes(&mut self, nw: u64); +} + +/// Opaque structure for streaming block, microblock, and header data from disk +#[derive(Debug, PartialEq, Clone)] +pub enum StreamCursor { + Block(BlockStreamData), + Microblocks(MicroblockStreamData), + Headers(HeaderStreamData), + MempoolTxs(TxStreamData), +} + #[derive(Debug, PartialEq, Clone)] pub struct BlockStreamData { - index_block_hash: StacksBlockId, // index block hash of the block to download - rowid: Option, // used when reading a blob out of staging - offset: u64, // offset into whatever is being read (the blob, or the file in the chunk store) - total_bytes: u64, // total number of bytes read. + /// index block hash of the block to download + index_block_hash: StacksBlockId, + /// offset into whatever is being read (the blob, or the file in the chunk store) + offset: u64, + /// total number of bytes read. + total_bytes: u64, +} - // used only for microblocks - is_microblock: bool, +#[derive(Debug, PartialEq, Clone)] +pub struct MicroblockStreamData { + /// index block hash of the block to download + index_block_hash: StacksBlockId, + /// microblock blob row id + rowid: Option, + /// offset into whatever is being read (the blob, or the file in the chunk store) + offset: u64, + /// total number of bytes read. + total_bytes: u64, + + /// length prefix + num_items_buf: [u8; 4], + num_items_ptr: usize, + + /// microblock pointer microblock_hash: BlockHeaderHash, parent_index_block_hash: StacksBlockId, - seq: u16, // only used for unconfirmed microblocks + + /// unconfirmed state + seq: u16, unconfirmed: bool, - num_mblocks_buf: [u8; 4], - num_mblocks_ptr: usize, +} + +#[derive(Debug, PartialEq, Clone)] +pub struct HeaderStreamData { + /// index block hash of the block to download + index_block_hash: StacksBlockId, + /// offset into whatever is being read (the blob, or the file in the chunk store) + offset: u64, + /// total number of bytes read. + total_bytes: u64, + /// number of headers requested + num_headers: u32, + + /// header buffer data + header_bytes: Option>, + end_of_stream: bool, + corked: bool, +} + +#[derive(Debug, PartialEq, Clone)] +pub struct TxStreamData { + /// Mempool sync data requested + pub tx_query: MemPoolSyncData, + /// last txid loaded + pub last_randomized_txid: Txid, + /// serialized transaction buffer that's being sent + pub tx_buf: Vec, + pub tx_buf_ptr: usize, + /// number of transactions visited in the DB so far + pub num_txs: u64, + /// maximum we can visit in the query + pub max_txs: u64, + /// height of the chain at time of query + pub height: u64, + /// Are we done sending transactions, and are now in the process of sending the trailing page + /// ID? + pub corked: bool, } pub const CHAINSTATE_VERSION: &'static str = "2"; @@ -528,9 +597,6 @@ const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ PRIMARY KEY(consensus_hash,block_hash) );"#, - "CREATE INDEX index_block_hash_to_primary_key ON block_headers(index_block_hash,consensus_hash,block_hash);", - "CREATE INDEX block_headers_hash_index ON block_headers(block_hash,block_height);", - "CREATE INDEX block_index_hash_index ON block_headers(index_block_hash,consensus_hash,block_hash);", r#" -- scheduled payments -- no designated primary key since there can be duplicate entries @@ -582,7 +648,6 @@ const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ orphaned INT NOT NULL, PRIMARY KEY(anchored_block_hash,consensus_hash,microblock_hash) );"#, - "CREATE INDEX staging_microblocks_index_hash ON staging_microblocks(index_block_hash);", r#" -- Staging microblocks data CREATE TABLE staging_microblocks_data(block_hash TEXT NOT NULL, @@ -617,11 +682,6 @@ const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ processed_time INT NOT NULL, -- when this block was processed PRIMARY KEY(anchored_block_hash,consensus_hash) );"#, - "CREATE INDEX processed_stacks_blocks ON staging_blocks(processed,anchored_block_hash,consensus_hash);", - "CREATE INDEX orphaned_stacks_blocks ON staging_blocks(orphaned,anchored_block_hash,consensus_hash);", - "CREATE INDEX parent_blocks ON staging_blocks(parent_anchored_block_hash);", - "CREATE INDEX parent_consensus_hashes ON staging_blocks(parent_consensus_hash);", - "CREATE INDEX index_block_hashes ON staging_blocks(index_block_hash);", r#" -- users who burned in support of a block CREATE TABLE staging_user_burn_support(anchored_block_hash TEXT NOT NULL, @@ -639,8 +699,6 @@ const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ result TEXT NOT NULL, UNIQUE (txid,index_block_hash) );"#, - "CREATE INDEX txid_tx_index ON transactions(txid);", - "CREATE INDEX index_block_hash_tx_index ON transactions(index_block_hash);", ]; const CHAINSTATE_SCHEMA_2: &'static [&'static str] = &[ @@ -655,6 +713,29 @@ const CHAINSTATE_SCHEMA_2: &'static [&'static str] = &[ "#, ]; +const CHAINSTATE_INDEXES: &'static [&'static str] = &[ + "CREATE INDEX IF NOT EXISTS index_block_hash_to_primary_key ON block_headers(index_block_hash,consensus_hash,block_hash);", + "CREATE INDEX IF NOT EXISTS block_headers_hash_index ON block_headers(block_hash,block_height);", + "CREATE INDEX IF NOT EXISTS block_index_hash_index ON block_headers(index_block_hash,consensus_hash,block_hash);", + "CREATE INDEX IF NOT EXISTS block_headers_burn_header_height ON block_headers(burn_header_height);", + "CREATE INDEX IF NOT EXISTS index_payments_block_hash_consensus_hash_vtxindex ON payments(block_hash,consensus_hash,vtxindex ASC);", + "CREATE INDEX IF NOT EXISTS index_payments_index_block_hash_vtxindex ON payments(index_block_hash,vtxindex ASC);", + "CREATE INDEX IF NOT EXISTS staging_microblocks_processed ON staging_microblocks(processed);", + "CREATE INDEX IF NOT EXISTS staging_microblocks_orphaned ON staging_microblocks(orphaned);", + "CREATE INDEX IF NOT EXISTS staging_microblocks_index_hash ON staging_microblocks(index_block_hash);", + "CREATE INDEX IF NOT EXISTS staging_microblocks_index_hash_processed ON staging_microblocks(index_block_hash,processed);", + "CREATE INDEX IF NOT EXISTS staging_microblocks_index_hash_orphaned ON staging_microblocks(index_block_hash,orphaned);", + "CREATE INDEX IF NOT EXISTS processed_stacks_blocks ON staging_blocks(processed,anchored_block_hash,consensus_hash);", + "CREATE INDEX IF NOT EXISTS orphaned_stacks_blocks ON staging_blocks(orphaned,anchored_block_hash,consensus_hash);", + "CREATE INDEX IF NOT EXISTS parent_blocks ON staging_blocks(parent_anchored_block_hash);", + "CREATE INDEX IF NOT EXISTS parent_consensus_hashes ON staging_blocks(parent_consensus_hash);", + "CREATE INDEX IF NOT EXISTS index_block_hashes ON staging_blocks(index_block_hash);", + "CREATE INDEX IF NOT EXISTS height_stacks_blocks ON staging_blocks(height);", + "CREATE INDEX IF NOT EXISTS index_staging_user_burn_support ON staging_user_burn_support(anchored_block_hash,consensus_hash);", + "CREATE INDEX IF NOT EXISTS txid_tx_index ON transactions(txid);", + "CREATE INDEX IF NOT EXISTS index_block_hash_tx_index ON transactions(index_block_hash);", +]; + #[cfg(test)] pub const MINER_REWARD_MATURITY: u64 = 2; // small for testing purposes @@ -776,6 +857,8 @@ impl StacksChainState { if migrate { StacksChainState::apply_schema_migrations(&tx, mainnet, chain_id)?; } + + StacksChainState::add_indexes(&tx)?; } dbtx.instantiate_index()?; @@ -854,6 +937,13 @@ impl StacksChainState { Ok(()) } + fn add_indexes<'a>(tx: &DBTx<'a>) -> Result<(), Error> { + for cmd in CHAINSTATE_INDEXES { + tx.execute_batch(cmd)?; + } + Ok(()) + } + fn open_db( mainnet: bool, chain_id: u32, @@ -868,6 +958,7 @@ impl StacksChainState { let mut marf = StacksChainState::open_index(index_path)?; let tx = marf.storage_tx()?; StacksChainState::apply_schema_migrations(&tx, mainnet, chain_id)?; + StacksChainState::add_indexes(&tx)?; tx.commit()?; Ok(marf) } @@ -887,6 +978,7 @@ impl StacksChainState { } else { let mut marf = StacksChainState::open_index(index_path)?; let tx = marf.storage_tx()?; + StacksChainState::add_indexes(&tx)?; tx.commit()?; Ok(marf) } diff --git a/src/chainstate/stacks/db/transactions.rs b/src/chainstate/stacks/db/transactions.rs index 79c2d79b00..b0ae2482b9 100644 --- a/src/chainstate/stacks/db/transactions.rs +++ b/src/chainstate/stacks/db/transactions.rs @@ -83,6 +83,7 @@ impl StacksTransactionReceipt { transaction: tx.into(), execution_cost: cost, microblock_header: None, + tx_index: 0, } } @@ -102,6 +103,7 @@ impl StacksTransactionReceipt { contract_analysis: None, execution_cost: cost, microblock_header: None, + tx_index: 0, } } @@ -121,6 +123,7 @@ impl StacksTransactionReceipt { contract_analysis: None, execution_cost: cost, microblock_header: None, + tx_index: 0, } } @@ -140,6 +143,7 @@ impl StacksTransactionReceipt { contract_analysis: Some(analysis), execution_cost: cost, microblock_header: None, + tx_index: 0, } } @@ -159,6 +163,7 @@ impl StacksTransactionReceipt { contract_analysis: Some(analysis), execution_cost: cost, microblock_header: None, + tx_index: 0, } } @@ -172,6 +177,7 @@ impl StacksTransactionReceipt { contract_analysis: None, execution_cost: ExecutionCost::zero(), microblock_header: None, + tx_index: 0, } } @@ -188,6 +194,7 @@ impl StacksTransactionReceipt { contract_analysis: None, execution_cost: analysis_cost, microblock_header: None, + tx_index: 0, } } @@ -205,6 +212,7 @@ impl StacksTransactionReceipt { contract_analysis: None, execution_cost: cost, microblock_header: None, + tx_index: 0, } } diff --git a/src/chainstate/stacks/db/unconfirmed.rs b/src/chainstate/stacks/db/unconfirmed.rs index ff0d0756fa..329dbd682c 100644 --- a/src/chainstate/stacks/db/unconfirmed.rs +++ b/src/chainstate/stacks/db/unconfirmed.rs @@ -82,6 +82,7 @@ pub struct UnconfirmedState { readonly: bool, dirty: bool, num_mblocks_added: u64, + have_state: bool, // fault injection for testing pub disable_cost_check: bool, @@ -113,6 +114,7 @@ impl UnconfirmedState { readonly: false, dirty: false, num_mblocks_added: 0, + have_state: false, disable_cost_check: check_fault_injection(FAULT_DISABLE_MICROBLOCKS_COST_CHECK), disable_bytes_check: check_fault_injection(FAULT_DISABLE_MICROBLOCKS_BYTES_CHECK), @@ -145,6 +147,7 @@ impl UnconfirmedState { readonly: true, dirty: false, num_mblocks_added: 0, + have_state: false, disable_cost_check: check_fault_injection(FAULT_DISABLE_MICROBLOCKS_COST_CHECK), disable_bytes_check: check_fault_injection(FAULT_DISABLE_MICROBLOCKS_BYTES_CHECK), @@ -195,9 +198,14 @@ impl UnconfirmedState { let mut new_cost = ExecutionCost::zero(); let mut new_bytes = 0; let mut num_new_mblocks = 0; + let mut have_state = self.have_state; if mblocks.len() > 0 { let cur_cost = self.cost_so_far.clone(); + + // NOTE: we *must* commit the clarity_tx now that it's begun. + // Otherwise, microblock miners can leave the MARF in a partially-initialized state, + // leading to a node crash. let mut clarity_tx = StacksChainState::chainstate_begin_unconfirmed( db_config, chainstate.db(), @@ -206,6 +214,9 @@ impl UnconfirmedState { &self.confirmed_chain_tip, ); + // we must roll this back later + have_state = true; + clarity_tx.reset_cost(cur_cost); for mblock in mblocks.into_iter() { @@ -225,7 +236,7 @@ impl UnconfirmedState { let mblock_header = mblock.header.clone(); debug!( - "Apply microblock {} ({}) to unconfirmed state", + "Try to apply microblock {} ({}) to unconfirmed state", &mblock_hash, mblock.header.sequence ); @@ -235,12 +246,10 @@ impl UnconfirmedState { &vec![mblock.clone()], ) { Ok(x) => x, - Err((Error::InvalidStacksMicroblock(msg, _), hdr)) => { - warn!("Invalid stacks microblock {}: {}", hdr, msg); - continue; - } Err((e, _)) => { - return Err(e); + // absorb the error + warn!("Encountered invalid stacks microblock: {}", &e); + break; } }; @@ -277,6 +286,7 @@ impl UnconfirmedState { self.cost_so_far = new_cost; self.bytes_so_far += new_bytes; self.num_mblocks_added += num_new_mblocks; + self.have_state = have_state; // apply injected faults if self.disable_cost_check { @@ -383,12 +393,41 @@ impl UnconfirmedState { 0 } } + + /// Try returning the unconfirmed chain tip. Only return the tip if the underlying MARF trie + /// exists, otherwise return None. + pub fn get_unconfirmed_state_if_exists(&mut self) -> Result, String> { + if self.is_readable() { + let trie_exists = match self + .clarity_inst + .trie_exists_for_block(&self.unconfirmed_chain_tip) + { + Ok(res) => res, + Err(e) => { + let err_str = format!( + "Failed to load Stacks chain tip; error checking underlying trie: {}", + e + ); + warn!("{}", err_str); + return Err(err_str); + } + }; + + if trie_exists { + Ok(Some(self.unconfirmed_chain_tip)) + } else { + Ok(None) + } + } else { + Ok(None) + } + } } impl StacksChainState { /// Clear the current unconfirmed state fn drop_unconfirmed_state(&mut self, mut unconfirmed: UnconfirmedState) { - if !unconfirmed.has_data() { + if !unconfirmed.have_state { debug!( "Dropping empty unconfirmed state off of {} ({})", &unconfirmed.confirmed_chain_tip, &unconfirmed.unconfirmed_chain_tip @@ -555,6 +594,8 @@ mod test { use chainstate::stacks::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; use chainstate::stacks::*; use core::mempool::*; + use core::*; + use net::relay::*; use net::test::*; use super::*; @@ -1028,4 +1069,278 @@ mod test { } } } + + #[test] + fn test_unconfirmed_refresh_invalid_microblock() { + let privk = StacksPrivateKey::new(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk)], + ) + .unwrap(); + + let initial_balance = 1000000000; + let mut peer_config = + TestPeerConfig::new("test_unconfirmed_refresh_invalid_microblock", 7004, 7005); + peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)]; + peer_config.epochs = Some(vec![StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: (i64::MAX) as u64, + block_limit: BLOCK_LIMIT_MAINNET_20, + network_epoch: PEER_VERSION_EPOCH_2_0, + }]); + + let mut peer = TestPeer::new(peer_config); + let chainstate_path = peer.chainstate_path.clone(); + + let num_blocks = 5; + let num_microblocks = 3; + let first_stacks_block_height = { + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + .unwrap(); + tip.block_height + }; + + let mut last_block: Option = None; + let mut next_nonce = 0; + let recv_addr = + StacksAddress::from_string("ST1H1B54MY50RMBRRKS7GV2ZWG79RZ1RQ1ETW4E01").unwrap(); + let mut recv_balance = 0; + + for tenure_id in 0..num_blocks { + let microblock_privkey = StacksPrivateKey::new(); + let microblock_pubkeyhash = + Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); + + // send transactions to the mempool + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + .unwrap(); + + assert_eq!( + tip.block_height, + first_stacks_block_height + (tenure_id as u64) + ); + if let Some(block) = last_block { + assert_eq!(tip.winning_stacks_block_hash, block.block_hash()); + } + + let mut anchor_size = 0; + let mut anchor_cost = ExecutionCost::zero(); + + let (burn_ops, stacks_block, _) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + _| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &parent_tip, + vrf_proof, + tip.total_burn, + microblock_pubkeyhash, + ) + .unwrap(); + + let anchored_tx = { + let tx = { + let auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_singlesig_p2pkh( + StacksPublicKey::from_private(&privk), + ) + .unwrap(), + ); + let mut tx_stx_transfer = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::TokenTransfer( + recv_addr.clone().into(), + 1, + TokenTransferMemo([0u8; 34]), + ), + ); + + tx_stx_transfer.chain_id = 0x80000000; + tx_stx_transfer.post_condition_mode = + TransactionPostConditionMode::Allow; + tx_stx_transfer.set_tx_fee(0); + tx_stx_transfer.set_origin_nonce(next_nonce); + next_nonce += 1; + tx_stx_transfer + }; + + let mut signer = StacksTransactionSigner::new(&tx); + signer.sign_origin(&privk).unwrap(); + + let signed_tx = signer.get_tx().unwrap(); + signed_tx + }; + // this will be accepted + recv_balance += 1; + + let coinbase_tx = make_coinbase(miner, tenure_id); + let (anchored_block, anchored_block_size, anchored_block_cost) = + StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx, anchored_tx], + ) + .unwrap(); + + anchor_size = anchored_block_size; + anchor_cost = anchored_block_cost; + (anchored_block, vec![]) + }, + ); + + last_block = Some(stacks_block.clone()); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch_at_tip(&stacks_block, &vec![]); + + let canonical_tip = StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + ); + + let mut sortdb = peer.sortdb.take().unwrap(); + let mut inner_node = peer.stacks_node.take().unwrap(); + + for i in 0..num_microblocks { + Relayer::refresh_unconfirmed(&mut inner_node.chainstate, &mut sortdb); + + let microblock = { + let sort_iconn = sortdb.index_conn(); + let mut microblock_builder = StacksMicroblockBuilder::resume_unconfirmed( + &mut inner_node.chainstate, + &sort_iconn, + &anchor_cost, + BlockBuilderSettings::max_value(), + ) + .unwrap(); + + // make a valid and then an in invalid microblock + let mut signed_txs = vec![]; + let tx = { + let auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_singlesig_p2pkh( + StacksPublicKey::from_private(&privk), + ) + .unwrap(), + ); + let mut tx_stx_transfer = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::TokenTransfer( + recv_addr.clone().into(), + 1, + TokenTransferMemo([0u8; 34]), + ), + ); + + tx_stx_transfer.chain_id = 0x80000000; + tx_stx_transfer.post_condition_mode = TransactionPostConditionMode::Allow; + tx_stx_transfer.set_tx_fee(0); + + if tenure_id % 2 == 0 { + // stream has an intermittent bad microblock + if i > 0 { + tx_stx_transfer.set_origin_nonce(next_nonce + i + 1000); + // bad nonce + } else { + tx_stx_transfer.set_origin_nonce(next_nonce); + next_nonce += 1; + recv_balance += 1; + } + } else { + // stream starts with a bad microblock + if i == 0 { + tx_stx_transfer.set_origin_nonce(next_nonce + i + 1000); + // bad nonce + } else { + tx_stx_transfer.set_origin_nonce(next_nonce); + } + } + + let mut signer = StacksTransactionSigner::new(&tx_stx_transfer); + signer.sign_origin(&privk).unwrap(); + + let signed_tx = signer.get_tx().unwrap(); + signed_tx + }; + + signed_txs.push(tx); + + let microblock = microblock_builder + .make_next_microblock(signed_txs, µblock_privkey, vec![], None) + .unwrap(); + microblock + }; + + inner_node + .chainstate + .preprocess_streamed_microblock( + &consensus_hash, + &stacks_block.block_hash(), + µblock, + ) + .unwrap(); + } + + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(inner_node); + } + + let (consensus_hash, canonical_block) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + let canonical_tip = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &canonical_block); + + // process microblock stream to generate unconfirmed state + let sortdb = peer.sortdb.take().unwrap(); + peer.chainstate() + .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .unwrap(); + + let db_recv_balance = peer + .chainstate() + .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db.get_account_stx_balance(&recv_addr.into()) + }) + }) + .unwrap() + .unwrap(); + peer.sortdb = Some(sortdb); + + // all valid txs were processed + assert_eq!(db_recv_balance.amount_unlocked, recv_balance); + } } diff --git a/src/chainstate/stacks/events.rs b/src/chainstate/stacks/events.rs index 26d3768e91..aed89a5688 100644 --- a/src/chainstate/stacks/events.rs +++ b/src/chainstate/stacks/events.rs @@ -61,6 +61,7 @@ pub struct StacksTransactionReceipt { pub contract_analysis: Option, pub execution_cost: ExecutionCost, pub microblock_header: Option, + pub tx_index: u32, } #[derive(Debug, Clone, PartialEq)] diff --git a/src/chainstate/stacks/index/proofs.rs b/src/chainstate/stacks/index/proofs.rs index d5686e8dcb..e6fdaeb293 100644 --- a/src/chainstate/stacks/index/proofs.rs +++ b/src/chainstate/stacks/index/proofs.rs @@ -289,6 +289,17 @@ impl StacksMessageCodec for TrieMerkleProofType { } } +impl StacksMessageCodec for TrieMerkleProof { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + self.0.consensus_serialize(fd) + } + + fn consensus_deserialize(fd: &mut R) -> Result, codec_error> { + let proof_parts: Vec> = read_next(fd)?; + Ok(TrieMerkleProof(proof_parts)) + } +} + impl TrieMerkleProof { pub fn to_hex(&self) -> String { let mut marf_proof = vec![]; diff --git a/src/chainstate/stacks/miner.rs b/src/chainstate/stacks/miner.rs index 4be02ec472..bbb41b0b10 100644 --- a/src/chainstate/stacks/miner.rs +++ b/src/chainstate/stacks/miner.rs @@ -25,20 +25,22 @@ use crate::cost_estimates::CostEstimator; use crate::types::StacksPublicKeyBuffer; use burnchains::PrivateKey; use burnchains::PublicKey; -use chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn}; +use chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleTx}; use chainstate::burn::operations::*; use chainstate::burn::*; use chainstate::stacks::db::unconfirmed::UnconfirmedState; use chainstate::stacks::db::{ - blocks::MemPoolRejection, ClarityTx, StacksChainState, MINER_REWARD_MATURITY, + blocks::MemPoolRejection, ChainstateTx, ClarityTx, MinerRewardInfo, StacksChainState, + MINER_REWARD_MATURITY, }; -use chainstate::stacks::events::StacksTransactionReceipt; +use chainstate::stacks::events::{StacksTransactionEvent, StacksTransactionReceipt}; use chainstate::stacks::Error; use chainstate::stacks::*; -use clarity_vm::clarity::ClarityConnection; +use clarity_vm::clarity::{ClarityConnection, ClarityInstance}; use core::mempool::*; use core::*; use net::Error as net_error; +use serde::Deserialize; use util::get_epoch_time_ms; use util::hash::MerkleTree; use util::hash::Sha512Trunc256Sum; @@ -51,6 +53,7 @@ use crate::types::chainstate::BurnchainHeaderHash; use crate::types::chainstate::{BlockHeaderHash, StacksAddress, StacksWorkScore}; use crate::types::chainstate::{StacksBlockHeader, StacksBlockId, StacksMicroblockHeader}; use crate::types::proof::TrieHash; +use chainstate::stacks::db::blocks::SetupBlockResult; #[derive(Debug, Clone)] pub struct BlockBuilderSettings { @@ -87,13 +90,29 @@ struct MicroblockMinerRuntime { disable_cost_check: bool, } +/// The value of `BlockLimitFunction` holds the state of the size of the block being built. +/// As the value increases, the less we can add to blocks. #[derive(PartialEq)] enum BlockLimitFunction { + /// The block size limit has not been hit, and there are no restrictions on what can be added to + /// a block. NO_LIMIT_HIT, + /// We have got a pretty full block, and so will not allow any more contract call or + /// contract publish transactions to be added to this block. CONTRACT_LIMIT_HIT, + /// We have a completely full block. No new transactions can be added to the block. LIMIT_REACHED, } +pub struct MinerEpochInfo<'a> { + pub chainstate_tx: ChainstateTx<'a>, + pub clarity_instance: &'a mut ClarityInstance, + pub burn_tip: BurnchainHeaderHash, + pub burn_tip_height: u32, + pub parent_microblocks: Vec, + pub mainnet: bool, +} + impl From<&UnconfirmedState> for MicroblockMinerRuntime { fn from(unconfirmed: &UnconfirmedState) -> MicroblockMinerRuntime { let considered = unconfirmed @@ -114,6 +133,245 @@ impl From<&UnconfirmedState> for MicroblockMinerRuntime { } } +/// Represents a successful transaction. This transaction should be added to the block. +#[derive(Debug, Clone, PartialEq)] +pub struct TransactionSuccess { + pub tx: StacksTransaction, + /// The fee that was charged to the user for doing this transaction. + pub fee: u64, + pub receipt: StacksTransactionReceipt, +} + +/// Represents a failed transaction. Something went wrong when processing this transaction. +#[derive(Debug)] +pub struct TransactionError { + pub tx: StacksTransaction, + pub error: Error, +} + +/// Represents a transaction that was skipped, but might succeed later. +#[derive(Debug)] +pub struct TransactionSkipped { + pub tx: StacksTransaction, + /// This error is the reason the transaction was skipped (ex: BlockTooBigError) + pub error: Error, +} + +/// Represents an event for a successful transaction. This transaction should be added to the block. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct TransactionSuccessEvent { + #[serde(deserialize_with = "hex_deserialize", serialize_with = "hex_serialize")] + pub txid: Txid, + pub fee: u64, + pub execution_cost: ExecutionCost, + pub result: Value, +} + +/// Represents an event for a failed transaction. Something went wrong when processing this transaction. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TransactionErrorEvent { + #[serde(deserialize_with = "hex_deserialize", serialize_with = "hex_serialize")] + pub txid: Txid, + pub error: String, +} + +/// Represents an event for a transaction that was skipped, but might succeed later. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct TransactionSkippedEvent { + #[serde(deserialize_with = "hex_deserialize", serialize_with = "hex_serialize")] + pub txid: Txid, + pub error: String, +} + +fn hex_serialize(txid: &Txid, s: S) -> Result { + let inst = txid.to_hex(); + s.serialize_str(inst.as_str()) +} + +fn hex_deserialize<'de, D: serde::Deserializer<'de>>(d: D) -> Result { + let inst_str = String::deserialize(d)?; + Txid::from_hex(&inst_str).map_err(serde::de::Error::custom) +} + +/// `TransactionResult` represents the outcome of transaction processing. +/// We use this enum to involve the compiler in forcing us to always clearly +/// indicate the outcome of a transaction. +/// +/// There are currently three outcomes for a transaction: +/// 1) succeed +/// 2) fail, may be tried again later +/// 3) be skipped for now, to be tried again later +#[derive(Debug)] +pub enum TransactionResult { + /// Transaction has already succeeded. + Success(TransactionSuccess), + /// Transaction failed when processed. + ProcessingError(TransactionError), + /// Transaction wasn't ready to be be processed, but might succeed later. + Skipped(TransactionSkipped), +} + +/// This struct is used to transmit data about transaction results through either the `mined_block` +/// or `mined_microblock` event. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum TransactionEvent { + /// Transaction has already succeeded. + Success(TransactionSuccessEvent), + /// Transaction failed. It may succeed later depending on the error. + ProcessingError(TransactionErrorEvent), + /// Transaction wasn't ready to be be processed, but might succeed later. + /// The bool represents whether mempool propagation should halt or continue + Skipped(TransactionSkippedEvent), +} + +impl TransactionResult { + /// Logs a queryable message for the case where `txid` has succeeded. + pub fn log_transaction_success(tx: &StacksTransaction) { + info!("Tx successfully processed."; + "event_name" => %"transaction_result", + "tx_id" => %tx.txid(), + "event_type" => %"success", + ); + } + + /// Logs a queryable message for the case where `txid` has failed + /// with error `err`. + pub fn log_transaction_error(tx: &StacksTransaction, err: &Error) { + info!("Tx processing failed with error"; + "event_name" => "transaction_result", + "reason" => %err, + "tx_id" => %tx.txid(), + "event_type" => "error", + ); + } + + /// Logs a queryable message for the case where `tx` has been skipped + /// for error `err`. + pub fn log_transaction_skipped(tx: &StacksTransaction, err: &Error) { + info!( + "Tx processing skipped"; + "event_name" => "transaction_result", + "tx_id" => %tx.txid(), + "event_type" => "skip", + "reason" => %err, + ); + } + + /// Creates a `TransactionResult` backed by `TransactionSuccess`. + /// This method logs "transaction success" as a side effect. + pub fn success( + transaction: &StacksTransaction, + fee: u64, + receipt: StacksTransactionReceipt, + ) -> TransactionResult { + Self::log_transaction_success(transaction); + Self::Success(TransactionSuccess { + tx: transaction.clone(), + fee: fee, + receipt: receipt, + }) + } + + /// Creates a `TransactionResult` backed by `TransactionError`. + /// This method logs "transaction error" as a side effect. + pub fn error(transaction: &StacksTransaction, error: Error) -> TransactionResult { + Self::log_transaction_error(transaction, &error); + TransactionResult::ProcessingError(TransactionError { + tx: transaction.clone(), + error: error, + }) + } + + /// Creates a `TransactionResult` backed by `TransactionSkipped`. + /// This method logs "transaction skipped" as a side effect. + /// Takes in a reason (String) and uses the default error type for + /// skipped transactions, `StacksTransactionSkipped` for the associated error. + pub fn skipped(transaction: &StacksTransaction, reason: String) -> TransactionResult { + let error = Error::StacksTransactionSkipped(reason); + Self::log_transaction_skipped(transaction, &error); + TransactionResult::Skipped(TransactionSkipped { + tx: transaction.clone(), + error: error, + }) + } + + /// Creates a `TransactionResult` backed by `TransactionSkipped`. + /// This method logs "transaction skipped" as a side effect. + pub fn skipped_due_to_error( + transaction: &StacksTransaction, + error: Error, + ) -> TransactionResult { + Self::log_transaction_skipped(transaction, &error); + TransactionResult::Skipped(TransactionSkipped { + tx: transaction.clone(), + error: error, + }) + } + + pub fn convert_to_event(&self) -> TransactionEvent { + match &self { + TransactionResult::Success(TransactionSuccess { tx, fee, receipt }) => { + TransactionEvent::Success(TransactionSuccessEvent { + txid: tx.txid(), + fee: *fee, + execution_cost: receipt.execution_cost.clone(), + result: receipt.result.clone(), + }) + } + TransactionResult::ProcessingError(TransactionError { tx, error }) => { + TransactionEvent::ProcessingError(TransactionErrorEvent { + txid: tx.txid(), + error: error.to_string(), + }) + } + TransactionResult::Skipped(TransactionSkipped { tx, error }) => { + TransactionEvent::Skipped(TransactionSkippedEvent { + txid: tx.txid(), + error: error.to_string(), + }) + } + } + } + + /// Returns true iff this enum is backed by `TransactionSuccess`. + pub fn is_ok(&self) -> bool { + match &self { + TransactionResult::Success(_) => true, + _ => false, + } + } + + /// Returns a TransactionSuccess result as a pair of 1) fee and 2) receipt. + /// Otherwise crashes. + pub fn unwrap(self) -> (u64, StacksTransactionReceipt) { + match self { + TransactionResult::Success(TransactionSuccess { + tx: _, + fee, + receipt, + }) => (fee, receipt), + _ => panic!("Tried to `unwrap` a non-success result."), + } + } + + /// Returns true iff this enum is backed by `Error`. + pub fn is_err(&self) -> bool { + match &self { + TransactionResult::ProcessingError(_) => true, + _ => false, + } + } + + /// Returns an Error result as an Error. + /// Otherwise crashes. + pub fn unwrap_err(self) -> Error { + match self { + TransactionResult::ProcessingError(TransactionError { tx: _, error }) => error, + _ => panic!("Tried to `unwrap_error` a non-error result."), + } + } +} + /// /// Independent structure for building microblocks: /// StacksBlockBuilder cannot be used, since microblocks should only be broadcasted @@ -275,10 +533,14 @@ impl<'a> StacksMicroblockBuilder<'a> { }) } - fn make_next_microblock( + /// Produce the next microblock in the stream, unconditionally, from the given txs. + /// No validity checking will be done. + pub fn make_next_microblock( &mut self, txs: Vec, miner_key: &Secp256k1PrivateKey, + tx_events: Vec, + event_dispatcher: Option<&dyn MemPoolEventDispatcher>, ) -> Result { let miner_pubkey_hash = Hash160::from_node_public_key(&StacksPublicKey::from_private(miner_key)); @@ -309,6 +571,15 @@ impl<'a> StacksMicroblockBuilder<'a> { txs: txs, }; + if let Some(dispatcher) = event_dispatcher { + dispatcher.mined_microblock_event( + µblock, + tx_events, + self.anchor_block_consensus_hash, + self.anchor_block, + ) + } + info!( "Miner: Created microblock block {} (seq={}) off of {}/{}: {} transaction(s)", microblock.block_hash(), @@ -321,22 +592,44 @@ impl<'a> StacksMicroblockBuilder<'a> { } /// Mine the next transaction into a microblock. - /// Returns Some(StacksTransactionReceipt) or None if the transaction was - /// or was not mined into this microblock. + /// Returns Ok(TransactionResult::Success) if the transaction was mined into this microblock. + /// Returns Ok(TransactionResult::Skipped) if the transaction was not mined, but can be mined later. + /// Returns Ok(TransactionResult::Error) if the transaction was not mined due to an error. + /// Returns Err(e) if an error occurs during the function. + /// + /// This calls `StacksChainState::process_transaction` and also checks certain pre-conditions + /// and handles errors. + /// + /// # Pre-Checks + /// - skip if the `anchor_mode` rules out micro-blocks + /// - skip if 'tx.txid()` is already in `considered` + /// - skip if adding the block would result in a block size bigger than `MAX_EPOCH_SIZE` + /// + /// # Error Handling + /// - If the error when processing a tx is `CostOverflowError`, reset the cost of the block. fn mine_next_transaction( clarity_tx: &mut ClarityTx<'a>, tx: StacksTransaction, tx_len: u64, considered: &mut HashSet, bytes_so_far: u64, - ) -> Result, Error> { + ) -> Result { if tx.anchor_mode != TransactionAnchorMode::OffChainOnly && tx.anchor_mode != TransactionAnchorMode::Any { - return Ok(None); + return Ok(TransactionResult::skipped_due_to_error( + &tx, + Error::InvalidStacksTransaction( + "Invalid transaction anchor mode for streamed data".to_string(), + false, + ), + )); } if considered.contains(&tx.txid()) { - return Ok(None); + return Ok(TransactionResult::skipped( + &tx, + "Already considered.".to_string(), + )); } else { considered.insert(tx.txid()); } @@ -345,29 +638,50 @@ impl<'a> StacksMicroblockBuilder<'a> { "Adding microblock tx {} would exceed epoch data size", &tx.txid() ); - return Ok(None); + return Ok(TransactionResult::skipped_due_to_error( + &tx, + Error::BlockTooBigError, + )); } let quiet = !cfg!(test); match StacksChainState::process_transaction(clarity_tx, &tx, quiet) { - Ok((_, receipt)) => return Ok(Some(receipt)), - Err(e) => match e { - Error::CostOverflowError(cost_before, cost_after, total_budget) => { - // note: this path _does_ not perform the tx block budget % heuristic, - // because this code path is not directly called with a mempool handle. - warn!( - "Transaction {} reached block cost {}; budget was {}", - tx.txid(), - &cost_after, - &total_budget - ); - clarity_tx.reset_cost(cost_before); - } - _ => { - warn!("Error processing TX {}: {}", tx.txid(), e); + Ok((fee, receipt)) => Ok(TransactionResult::success(&tx, fee, receipt)), + Err(e) => { + match &e { + Error::CostOverflowError(cost_before, cost_after, total_budget) => { + // note: this path _does_ not perform the tx block budget % heuristic, + // because this code path is not directly called with a mempool handle. + clarity_tx.reset_cost(cost_before.clone()); + if total_budget.proportion_largest_dimension(&cost_before) + < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC + { + warn!( + "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", + tx.txid(), + 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, + &total_budget + ); + return Ok(TransactionResult::error( + &tx, + Error::TransactionTooBigError, + )); + } else { + warn!( + "Transaction {} reached block cost {}; budget was {}", + tx.txid(), + &cost_after, + &total_budget + ); + return Ok(TransactionResult::skipped_due_to_error( + &tx, + Error::BlockTooBigError, + )); + } + } + _ => Ok(TransactionResult::error(&tx, e)), } - }, + } } - return Ok(None); } /// NOTE: this is only used in integration tests. @@ -391,6 +705,7 @@ impl<'a> StacksMicroblockBuilder<'a> { let mut bytes_so_far = self.runtime.bytes_so_far; let mut num_txs = self.runtime.num_mined; + let mut tx_events = Vec::new(); let mut result = Ok(()); for (tx, tx_len) in txs_and_lens.into_iter() { @@ -401,15 +716,20 @@ impl<'a> StacksMicroblockBuilder<'a> { &mut considered, bytes_so_far, ) { - Ok(Some(_)) => { - test_debug!("Include tx {} in microblock", tx.txid()); - bytes_so_far += tx_len; - num_txs += 1; - txs_included.push(tx); - } - Ok(None) => { - test_debug!("Exclude tx {} from microblock", tx.txid()); - continue; + Ok(tx_result) => { + tx_events.push(tx_result.convert_to_event()); + match tx_result { + TransactionResult::Success(..) => { + test_debug!("Include tx {} in microblock", tx.txid()); + bytes_so_far += tx_len; + num_txs += 1; + txs_included.push(tx); + } + TransactionResult::Skipped(..) | TransactionResult::ProcessingError(..) => { + test_debug!("Exclude tx {} from microblock", tx.txid()); + continue; + } + } } Err(e) => { result = Err(e); @@ -434,9 +754,6 @@ impl<'a> StacksMicroblockBuilder<'a> { self.runtime.num_mined = num_txs; match result { - Err(Error::BlockTooBigError) => { - info!("Block size budget reached with microblocks"); - } Err(e) => { warn!("Error producing microblock: {}", e); return Err(e); @@ -444,13 +761,14 @@ impl<'a> StacksMicroblockBuilder<'a> { _ => {} } - return self.make_next_microblock(txs_included, miner_key); + return self.make_next_microblock(txs_included, miner_key, tx_events, None); } pub fn mine_next_microblock( &mut self, mem_pool: &mut MemPoolDB, miner_key: &Secp256k1PrivateKey, + event_dispatcher: &dyn MemPoolEventDispatcher, ) -> Result { let mut txs_included = vec![]; let mempool_settings = self.settings.mempool_settings.clone(); @@ -469,6 +787,7 @@ impl<'a> StacksMicroblockBuilder<'a> { let mut bytes_so_far = self.runtime.bytes_so_far; let mut num_txs = self.runtime.num_mined; let mut num_selected = 0; + let mut tx_events = Vec::new(); let deadline = get_epoch_time_ms() + (self.settings.max_miner_time_ms as u128); mem_pool.reset_last_known_nonces()?; @@ -509,34 +828,43 @@ impl<'a> StacksMicroblockBuilder<'a> { &mut considered, bytes_so_far, ) { - Ok(Some(receipt)) => { - bytes_so_far += mempool_tx.metadata.len; - - if update_estimator { - if let Err(e) = estimator.notify_event( - &mempool_tx.tx.payload, - &receipt.execution_cost, - &block_limit, - &stacks_epoch_id, - ) { - warn!("Error updating estimator"; + Ok(tx_result) => { + tx_events.push(tx_result.convert_to_event()); + match tx_result { + TransactionResult::Success(TransactionSuccess { + receipt, + .. + }) => { + bytes_so_far += mempool_tx.metadata.len; + + if update_estimator { + if let Err(e) = estimator.notify_event( + &mempool_tx.tx.payload, + &receipt.execution_cost, + &block_limit, + &stacks_epoch_id, + ) { + warn!("Error updating estimator"; "txid" => %mempool_tx.metadata.txid, "error" => ?e); + } + } + + debug!( + "Include tx {} ({}) in microblock", + mempool_tx.tx.txid(), + mempool_tx.tx.payload.name() + ); + txs_included.push(mempool_tx.tx.clone()); + num_txs += 1; + num_added += 1; + num_selected += 1; + Ok(true) } + TransactionResult::Skipped(..) + | TransactionResult::ProcessingError(..) => Ok(true), // keep iterating } - - debug!( - "Include tx {} ({}) in microblock", - mempool_tx.tx.txid(), - mempool_tx.tx.payload.name() - ); - txs_included.push(mempool_tx.tx.clone()); - num_txs += 1; - num_added += 1; - num_selected += 1; - Ok(true) } - Ok(None) => Ok(true), // keep iterating Err(e) => Err(e), } }, @@ -583,7 +911,12 @@ impl<'a> StacksMicroblockBuilder<'a> { } } - return self.make_next_microblock(txs_included, miner_key); + return self.make_next_microblock( + txs_included, + miner_key, + tx_events, + Some(event_dispatcher), + ); } pub fn get_bytes_so_far(&self) -> u64 { @@ -636,7 +969,6 @@ impl StacksBlockBuilder { StacksBlockBuilder { chain_tip: parent_chain_tip.clone(), - header: header, txs: vec![], micro_txs: vec![], total_anchored_fees: 0, @@ -644,6 +976,9 @@ impl StacksBlockBuilder { total_streamed_fees: 0, bytes_so_far: bytes_so_far, anchored_done: false, + parent_consensus_hash: parent_chain_tip.consensus_hash.clone(), + parent_header_hash: header.parent_block.clone(), + header: header, parent_microblock_hash: parent_chain_tip .microblock_tail .as_ref() @@ -772,15 +1107,18 @@ impl StacksBlockBuilder { } /// Append a transaction if doing so won't exceed the epoch data size. - /// Errors out if we exceed budget, or the transaction is invalid. + /// Errors out if we fail to mine the tx (exceed budget, or the transaction is invalid). pub fn try_mine_tx( &mut self, clarity_tx: &mut ClarityTx, tx: &StacksTransaction, - ) -> Result<(), Error> { + ) -> Result { let tx_len = tx.tx_len(); - self.try_mine_tx_with_len(clarity_tx, tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT) - .map(|_| ()) + match self.try_mine_tx_with_len(clarity_tx, tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT) { + TransactionResult::Success(s) => Ok(TransactionResult::Success(s)), + TransactionResult::Skipped(TransactionSkipped { error, .. }) + | TransactionResult::ProcessingError(TransactionError { error, .. }) => Err(error), + } } /// Append a transaction if doing so won't exceed the epoch data size. @@ -791,9 +1129,9 @@ impl StacksBlockBuilder { tx: &StacksTransaction, tx_len: u64, limit_behavior: &BlockLimitFunction, - ) -> Result { + ) -> TransactionResult { if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { - return Err(Error::BlockTooBigError); + return TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError); } match limit_behavior { @@ -803,43 +1141,61 @@ impl StacksBlockBuilder { // once we've hit the runtime limit once, allow boot code contract calls, but do not try to eval // other contract calls if !cc.address.is_boot_code_addr() { - return Err(Error::StacksTransactionSkipped); + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), + ); } } TransactionPayload::SmartContract(_) => { - return Err(Error::StacksTransactionSkipped) + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), + ); } _ => {} } } - BlockLimitFunction::LIMIT_REACHED => return Err(Error::StacksTransactionSkipped), + BlockLimitFunction::LIMIT_REACHED => { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::LIMIT_REACHED".to_string(), + ) + } BlockLimitFunction::NO_LIMIT_HIT => {} }; let quiet = !cfg!(test); - let receipt = if !self.anchored_done { + let result = if !self.anchored_done { // building up the anchored blocks if tx.anchor_mode != TransactionAnchorMode::OnChainOnly && tx.anchor_mode != TransactionAnchorMode::Any { - return Err(Error::InvalidStacksTransaction( - "Invalid transaction anchor mode for anchored data".to_string(), - false, - )); + return TransactionResult::skipped_due_to_error( + tx, + Error::InvalidStacksTransaction( + "Invalid transaction anchor mode for anchored data".to_string(), + false, + ), + ); } - let (fee, receipt) = StacksChainState::process_transaction(clarity_tx, tx, quiet) - .map_err(|e| match e { + let (fee, receipt) = match StacksChainState::process_transaction(clarity_tx, tx, quiet) + { + Ok((fee, receipt)) => (fee, receipt), + Err(e) => match e { Error::CostOverflowError(cost_before, cost_after, total_budget) => { clarity_tx.reset_cost(cost_before.clone()); - if total_budget.proportion_largest_dimension(&cost_before) < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC { + if total_budget.proportion_largest_dimension(&cost_before) + < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC + { warn!( "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", tx.txid(), 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, &total_budget ); - Error::TransactionTooBigError + return TransactionResult::error(&tx, Error::TransactionTooBigError); } else { warn!( "Transaction {} reached block cost {}; budget was {}", @@ -847,12 +1203,15 @@ impl StacksBlockBuilder { &cost_after, &total_budget ); - Error::BlockTooBigError + return TransactionResult::skipped_due_to_error( + &tx, + Error::BlockTooBigError, + ); } } - _ => e, - })?; - + _ => return TransactionResult::error(&tx, e), + }, + }; info!("Include tx"; "tx" => %tx.txid(), "payload" => tx.payload.name(), @@ -862,30 +1221,37 @@ impl StacksBlockBuilder { self.txs.push(tx.clone()); self.total_anchored_fees += fee; - receipt + TransactionResult::success(&tx, fee, receipt) } else { // building up the microblocks if tx.anchor_mode != TransactionAnchorMode::OffChainOnly && tx.anchor_mode != TransactionAnchorMode::Any { - return Err(Error::InvalidStacksTransaction( - "Invalid transaction anchor mode for streamed data".to_string(), - false, - )); + return TransactionResult::skipped_due_to_error( + tx, + Error::InvalidStacksTransaction( + "Invalid transaction anchor mode for streamed data".to_string(), + false, + ), + ); } - let (fee, receipt) = StacksChainState::process_transaction(clarity_tx, tx, quiet) - .map_err(|e| match e { + let (fee, receipt) = match StacksChainState::process_transaction(clarity_tx, tx, quiet) + { + Ok((fee, receipt)) => (fee, receipt), + Err(e) => match e { Error::CostOverflowError(cost_before, cost_after, total_budget) => { clarity_tx.reset_cost(cost_before.clone()); - if total_budget.proportion_largest_dimension(&cost_before) < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC { + if total_budget.proportion_largest_dimension(&cost_before) + < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC + { warn!( "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", tx.txid(), 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, &total_budget ); - Error::TransactionTooBigError + return TransactionResult::error(&tx, Error::TransactionTooBigError); } else { warn!( "Transaction {} reached block cost {}; budget was {}", @@ -893,12 +1259,15 @@ impl StacksBlockBuilder { &cost_after, &total_budget ); - Error::BlockTooBigError + return TransactionResult::skipped_due_to_error( + &tx, + Error::BlockTooBigError, + ); } } - _ => e, - })?; - + _ => return TransactionResult::error(&tx, e), + }, + }; debug!( "Include tx {} ({}) in microblock", tx.txid(), @@ -909,11 +1278,11 @@ impl StacksBlockBuilder { self.micro_txs.push(tx.clone()); self.total_streamed_fees += fee; - receipt + TransactionResult::success(&tx, fee, receipt) }; self.bytes_so_far += tx_len; - Ok(receipt) + result } /// Append a transaction if doing so won't exceed the epoch data size. @@ -971,40 +1340,7 @@ impl StacksBlockBuilder { Ok(()) } - /// Finish building the anchored block. - /// TODO: expand to deny mining a block whose anchored static checks fail (and allow the caller - /// to disable this, in order to test mining invalid blocks) - pub fn mine_anchored_block(&mut self, clarity_tx: &mut ClarityTx) -> StacksBlock { - assert!(!self.anchored_done); - - // add miner payments - if let Some((ref miner_reward, ref user_rewards, ref parent_reward)) = self.miner_payouts { - // grant in order by miner, then users - let matured_ustx = StacksChainState::process_matured_miner_rewards( - clarity_tx, - miner_reward, - user_rewards, - parent_reward, - ) - .expect("FATAL: failed to process miner rewards"); - - clarity_tx.increment_ustx_liquid_supply(matured_ustx); - } - - // process unlocks - let (new_unlocked_ustx, _) = - StacksChainState::process_stx_unlocks(clarity_tx).expect("FATAL: failed to unlock STX"); - - clarity_tx.increment_ustx_liquid_supply(new_unlocked_ustx); - - // mark microblock public key as used - StacksChainState::insert_microblock_pubkey_hash( - clarity_tx, - self.header.total_work.work as u32, - &self.header.microblock_pubkey_hash, - ) - .expect("FATAL: failed to insert microblock pubkey hash"); - + pub fn finalize_block(&mut self, clarity_tx: &mut ClarityTx) -> StacksBlock { // done! Calculate state root and tx merkle root let txid_vecs = self .txs @@ -1054,6 +1390,22 @@ impl StacksBlockBuilder { block } + /// Finish building the anchored block. + /// TODO: expand to deny mining a block whose anchored static checks fail (and allow the caller + /// to disable this, in order to test mining invalid blocks) + /// Returns: stacks block + pub fn mine_anchored_block(&mut self, clarity_tx: &mut ClarityTx) -> StacksBlock { + assert!(!self.anchored_done); + StacksChainState::finish_block( + clarity_tx, + self.miner_payouts.clone(), + self.header.total_work.work as u32, + self.header.microblock_pubkey_hash, + ) + .expect("FATAL: call to `finish_block` failed"); + self.finalize_block(clarity_tx) + } + /// Cut the next microblock. pub fn mine_next_microblock<'a>(&mut self) -> Result { let txid_vecs = self @@ -1139,34 +1491,18 @@ impl StacksBlockBuilder { } } - /// Begin mining an epoch's transactions. - /// Returns an open ClarityTx for mining the block, as well as the ExecutionCost of any confirmed - /// microblocks. - /// NOTE: even though we don't yet know the block hash, the Clarity VM ensures that a - /// transaction can't query information about the _current_ block (i.e. information that is not - /// yet known). - pub fn epoch_begin<'a>( + /// This function should be called before `epoch_begin`. + /// It loads the parent microblock stream, sets the parent microblock, and returns + /// data necessary for `epoch_begin`. + /// Returns chainstate transaction, clarity instance, burnchain header hash + /// of the burn tip, burn tip height + 1, the parent microblock stream, + /// the parent consensus hash, the parent header hash, and a bool + /// representing whether the network is mainnet or not. + pub fn pre_epoch_begin<'a>( &mut self, chainstate: &'a mut StacksChainState, burn_dbconn: &'a SortitionDBConn, - ) -> Result<(ClarityTx<'a>, ExecutionCost), Error> { - let mainnet = chainstate.config().mainnet; - - // find matured miner rewards, so we can grant them within the Clarity DB tx. - let (latest_matured_miners, matured_miner_parent) = { - let mut tx = chainstate.index_tx_begin()?; - let latest_miners = - StacksChainState::get_scheduled_block_rewards(&mut tx, &self.chain_tip)?; - let parent_miner = - StacksChainState::get_parent_matured_miner(&mut tx, mainnet, &latest_miners)?; - (latest_miners, parent_miner) - }; - - // there's no way the miner can learn either the burn block hash or the stacks block hash, - // so use a sentinel hash value for each that will never occur in practice. - let new_consensus_hash = MINER_BLOCK_CONSENSUS_HASH.clone(); - let new_block_hash = MINER_BLOCK_HEADER_HASH.clone(); - + ) -> Result, Error> { debug!( "Miner epoch begin"; "miner" => %self.miner_id, @@ -1184,10 +1520,10 @@ impl StacksBlockBuilder { ); } - let parent_consensus_hash = self.chain_tip.consensus_hash.clone(); - let parent_header_hash = self.header.parent_block.clone(); - let parent_index_hash = - StacksBlockHeader::make_index_block_hash(&parent_consensus_hash, &parent_header_hash); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &self.parent_consensus_hash, + &self.parent_header_hash, + ); let burn_tip = SortitionDB::get_canonical_chain_tip_bhh(burn_dbconn.conn())?; let burn_tip_height = @@ -1195,24 +1531,24 @@ impl StacksBlockBuilder { let parent_microblocks = if StacksChainState::block_crosses_epoch_boundary( chainstate.db(), - &parent_consensus_hash, - &parent_header_hash, + &self.parent_consensus_hash, + &self.parent_header_hash, )? { - info!("Descendant of {}/{} will NOT confirm any microblocks, since it will cross an epoch boundary", &parent_consensus_hash, &parent_header_hash); + info!("Descendant of {}/{} will NOT confirm any microblocks, since it will cross an epoch boundary", &self.parent_consensus_hash, &self.parent_header_hash); vec![] } else { match self.load_parent_microblocks( chainstate, - &parent_consensus_hash, - &parent_header_hash, + &self.parent_consensus_hash.clone(), + &self.parent_header_hash.clone(), &parent_index_hash, ) { Ok(x) => x, Err(e) => { warn!("Miner failed to load parent microblock, mining without parent microblock tail"; - "parent_block_hash" => %parent_header_hash, + "parent_block_hash" => %self.parent_header_hash, "parent_index_hash" => %parent_index_hash, - "parent_consensus_hash" => %parent_consensus_hash, + "parent_consensus_hash" => %self.parent_consensus_hash, "parent_microblock_hash" => match self.parent_microblock_hash.as_ref() { Some(x) => format!("Some({})", x.to_string()), None => "None".to_string(), @@ -1225,103 +1561,72 @@ impl StacksBlockBuilder { debug!( "Descendant of {}/{} confirms {} microblock(s)", - &parent_consensus_hash, - &parent_header_hash, + &self.parent_consensus_hash, + &self.parent_header_hash, parent_microblocks.len() ); - let stacking_burn_ops = SortitionDB::get_stack_stx_ops(burn_dbconn.conn(), &burn_tip)?; - let transfer_burn_ops = SortitionDB::get_transfer_stx_ops(burn_dbconn.conn(), &burn_tip)?; - - let parent_block_cost_opt = if parent_microblocks.is_empty() { - None - } else { - StacksChainState::get_stacks_block_anchored_cost(chainstate.db(), &parent_index_hash)? - }; - - let mut tx = chainstate.block_begin( - burn_dbconn, - &parent_consensus_hash, - &parent_header_hash, - &new_consensus_hash, - &new_block_hash, - ); - - let matured_miner_rewards_opt = StacksChainState::find_mature_miner_rewards( - &mut tx, - &self.chain_tip, - latest_matured_miners, - matured_miner_parent, - )?; - - self.miner_payouts = - matured_miner_rewards_opt.map(|(miner, users, parent, _)| (miner, users, parent)); - - debug!( - "Miner {}: Apply {} parent microblocks", - self.miner_id, - parent_microblocks.len() - ); - - let t1 = get_epoch_time_ms(); - - let mblock_confirmed_cost = if parent_microblocks.len() == 0 { + if parent_microblocks.len() == 0 { self.set_parent_microblock(&EMPTY_MICROBLOCK_PARENT_HASH, 0); - ExecutionCost::zero() } else { - let parent_block_cost = parent_block_cost_opt.ok_or_else(|| { - Error::InvalidStacksBlock(format!( - "Failed to load parent block cost. parent_stacks_block_id = {}", - &parent_index_hash - )) - })?; - - tx.reset_cost(parent_block_cost.clone()); - - match StacksChainState::process_microblocks_transactions(&mut tx, &parent_microblocks) { - Ok((fees, ..)) => { - self.total_confirmed_streamed_fees += fees as u64; - } - Err((e, mblock_header_hash)) => { - let msg = format!( - "Invalid Stacks microblocks {},{} (offender {}): {:?}", - parent_consensus_hash, parent_header_hash, mblock_header_hash, &e - ); - warn!("{}", &msg); - - return Err(Error::InvalidStacksMicroblock(msg, mblock_header_hash)); - } - }; let num_mblocks = parent_microblocks.len(); let last_mblock_hdr = parent_microblocks[num_mblocks - 1].header.clone(); self.set_parent_microblock(&last_mblock_hdr.block_hash(), last_mblock_hdr.sequence); - - let mut microblock_cost = tx.cost_so_far(); - microblock_cost - .sub(&parent_block_cost) - .expect("BUG: block_cost + microblock_cost < block_cost"); - - // if we get here, then we need to reset the block-cost back to 0 because this begins the - // block defined by this miner. - tx.reset_cost(ExecutionCost::zero()); - - microblock_cost }; - let t2 = get_epoch_time_ms(); + let mainnet = chainstate.config().mainnet; - debug!( - "Miner {}: Finished applying {} parent microblocks in {}ms\n", - self.miner_id, - parent_microblocks.len(), - t2.saturating_sub(t1) - ); + let (chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin()?; - StacksChainState::process_epoch_transition(&mut tx, burn_tip_height + 1)?; - StacksChainState::process_stacking_ops(&mut tx, stacking_burn_ops); - StacksChainState::process_transfer_ops(&mut tx, transfer_burn_ops); + Ok(MinerEpochInfo { + chainstate_tx, + clarity_instance, + burn_tip, + burn_tip_height: burn_tip_height + 1, + parent_microblocks, + mainnet, + }) + } - Ok((tx, mblock_confirmed_cost)) + /// Begin mining an epoch's transactions. + /// Returns an open ClarityTx for mining the block, as well as the ExecutionCost of any confirmed + /// microblocks. + /// NOTE: even though we don't yet know the block hash, the Clarity VM ensures that a + /// transaction can't query information about the _current_ block (i.e. information that is not + /// yet known). + /// This function was separated from `pre_epoch_begin` because something "higher" than `epoch_begin` + /// must own `ChainstateTx` and `ClarityInstance`, which are borrowed to construct the + /// returned ClarityTx object. + pub fn epoch_begin<'a, 'b>( + &mut self, + burn_dbconn: &'a SortitionDBConn, + info: &'b mut MinerEpochInfo<'a>, + ) -> Result<(ClarityTx<'b>, ExecutionCost), Error> { + let SetupBlockResult { + clarity_tx, + microblock_execution_cost, + microblock_fees, + matured_miner_rewards_opt, + .. + } = StacksChainState::setup_block( + &mut info.chainstate_tx, + info.clarity_instance, + burn_dbconn, + burn_dbconn.conn(), + &self.chain_tip, + info.burn_tip, + info.burn_tip_height, + self.parent_consensus_hash, + self.parent_header_hash, + &info.parent_microblocks, + info.mainnet, + Some(self.miner_id), + )?; + self.miner_payouts = + matured_miner_rewards_opt.map(|(miner, users, parent, _)| (miner, users, parent)); + self.total_confirmed_streamed_fees += microblock_fees as u64; + + Ok((clarity_tx, microblock_execution_cost)) } /// Finish up mining an epoch's transactions @@ -1360,7 +1665,8 @@ impl StacksBlockBuilder { ) -> Result<(StacksBlock, u64, ExecutionCost), Error> { debug!("Build anchored block from {} transactions", txs.len()); let (mut chainstate, _) = chainstate_handle.reopen()?; - let (mut epoch_tx, _) = builder.epoch_begin(&mut chainstate, burn_dbconn)?; + let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn)?; + let (mut epoch_tx, _) = builder.epoch_begin(burn_dbconn, &mut miner_epoch_info)?; for tx in txs.drain(..) { match builder.try_mine_tx(&mut epoch_tx, &tx) { Ok(_) => { @@ -1533,16 +1839,20 @@ impl StacksBlockBuilder { let ts_start = get_epoch_time_ms(); + let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn)?; let (mut epoch_tx, confirmed_mblock_cost) = - builder.epoch_begin(&mut chainstate, burn_dbconn)?; - + builder.epoch_begin(burn_dbconn, &mut miner_epoch_info)?; let stacks_epoch_id = epoch_tx.get_epoch(); - let block_limit = epoch_tx .block_limit() .expect("Failed to obtain block limit from miner's block connection"); - builder.try_mine_tx(&mut epoch_tx, coinbase_tx)?; + let mut tx_events = Vec::new(); + tx_events.push( + builder + .try_mine_tx(&mut epoch_tx, coinbase_tx)? + .convert_to_event(), + ); mempool.reset_last_known_nonces()?; @@ -1605,18 +1915,21 @@ impl StacksBlockBuilder { considered.insert(txinfo.tx.txid()); num_considered += 1; - match builder.try_mine_tx_with_len( + let tx_result = builder.try_mine_tx_with_len( epoch_tx, &txinfo.tx, txinfo.metadata.len, &block_limit_hit, - ) { - Ok(tx_receipt) => { + ); + tx_events.push(tx_result.convert_to_event()); + + match tx_result { + TransactionResult::Success(TransactionSuccess { receipt, .. }) => { num_txs += 1; if update_estimator { if let Err(e) = estimator.notify_event( &txinfo.tx.payload, - &tx_receipt.execution_cost, + &receipt.execution_cost, &block_limit, &stacks_epoch_id, ) { @@ -1626,40 +1939,55 @@ impl StacksBlockBuilder { } } } - Err(Error::StacksTransactionSkipped) => {} - Err(Error::BlockTooBigError) => { - // done mining -- our execution budget is exceeded. - // Make the block from the transactions we did manage to get - debug!("Block budget exceeded on tx {}", &txinfo.tx.txid()); - if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { - debug!("Switch to mining stx-transfers only"); - block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; - } else if block_limit_hit == BlockLimitFunction::CONTRACT_LIMIT_HIT - { - debug!("Stop mining anchored block due to limit exceeded"); - block_limit_hit = BlockLimitFunction::LIMIT_REACHED; - return Ok(false); - } - } - Err(Error::TransactionTooBigError) => { - invalidated_txs.push(txinfo.metadata.txid); - if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { - block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; - debug!("Switch to mining stx-transfers only"); - } else if block_limit_hit == BlockLimitFunction::CONTRACT_LIMIT_HIT - { - debug!("Stop mining anchored block due to limit exceeded"); - block_limit_hit = BlockLimitFunction::LIMIT_REACHED; - return Ok(false); + TransactionResult::Skipped(TransactionSkipped { error, .. }) + | TransactionResult::ProcessingError(TransactionError { + error, .. + }) => { + match &error { + Error::StacksTransactionSkipped(_) => {} + Error::BlockTooBigError => { + // done mining -- our execution budget is exceeded. + // Make the block from the transactions we did manage to get + debug!("Block budget exceeded on tx {}", &txinfo.tx.txid()); + if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { + debug!("Switch to mining stx-transfers only"); + block_limit_hit = + BlockLimitFunction::CONTRACT_LIMIT_HIT; + } else if block_limit_hit + == BlockLimitFunction::CONTRACT_LIMIT_HIT + { + debug!( + "Stop mining anchored block due to limit exceeded" + ); + block_limit_hit = BlockLimitFunction::LIMIT_REACHED; + return Ok(false); + } + } + Error::TransactionTooBigError => { + invalidated_txs.push(txinfo.metadata.txid); + if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { + block_limit_hit = + BlockLimitFunction::CONTRACT_LIMIT_HIT; + debug!("Switch to mining stx-transfers only"); + } else if block_limit_hit + == BlockLimitFunction::CONTRACT_LIMIT_HIT + { + debug!( + "Stop mining anchored block due to limit exceeded" + ); + block_limit_hit = BlockLimitFunction::LIMIT_REACHED; + return Ok(false); + } + } + Error::InvalidStacksTransaction(_, true) => { + // if we have an invalid transaction that was quietly ignored, don't warn here either + } + e => { + warn!("Failed to apply tx {}: {:?}", &txinfo.tx.txid(), &e); + return Ok(true); + } } } - Err(Error::InvalidStacksTransaction(_, true)) => { - // if we have an invalid transaction that was quietly ignored, don't warn here either - } - Err(e) => { - warn!("Failed to apply tx {}: {:?}", &txinfo.tx.txid(), &e); - return Ok(true); - } } mined_origin_nonces @@ -1717,6 +2045,7 @@ impl StacksBlockBuilder { size, &consumed, &confirmed_mblock_cost, + tx_events, ); } @@ -2812,8 +3141,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = block_builder( @@ -2993,8 +3325,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_1_block_builder( @@ -3133,8 +3468,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_1_block_builder( @@ -3178,8 +3516,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_2_block_builder( @@ -3465,8 +3806,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_1_block_builder( @@ -3510,8 +3854,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_2_block_builder( @@ -3720,8 +4067,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_1_block_builder( @@ -3767,8 +4117,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_2_block_builder( @@ -4052,8 +4405,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_1_block_builder( @@ -4094,8 +4450,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_2_block_builder( @@ -4289,8 +4648,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_1_block_builder( @@ -4334,8 +4696,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_2_block_builder( @@ -4589,8 +4954,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_1_block_builder( @@ -4631,8 +4999,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_2_block_builder( @@ -4826,8 +5197,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_1_block_builder( @@ -4871,8 +5245,11 @@ pub mod test { ); let sort_iconn = sortdb.index_conn(); + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; let (stacks_block, microblocks) = miner_2_block_builder( diff --git a/src/chainstate/stacks/mod.rs b/src/chainstate/stacks/mod.rs index 478406dbc2..09c2df073d 100644 --- a/src/chainstate/stacks/mod.rs +++ b/src/chainstate/stacks/mod.rs @@ -144,12 +144,13 @@ pub enum Error { InvalidFee, InvalidStacksBlock(String), InvalidStacksMicroblock(String, BlockHeaderHash), + // The bool is true if the invalid transaction was quietly ignored. InvalidStacksTransaction(String, bool), /// This error indicates that the considered transaction was skipped /// because of the current state of the block assembly algorithm, /// but the transaction otherwise may be valid (e.g., block assembly is /// only considering STX transfers and this tx isn't a transfer). - StacksTransactionSkipped, + StacksTransactionSkipped(String), PostConditionFailed(String), NoSuchBlockError, InvalidChainstateDB, @@ -234,8 +235,12 @@ impl fmt::Display for Error { Error::PoxAlreadyLocked => write!(f, "Account has already locked STX for PoX"), Error::PoxInsufficientBalance => write!(f, "Not enough STX to lock"), Error::PoxNoRewardCycle => write!(f, "No such reward cycle"), - Error::StacksTransactionSkipped => { - write!(f, "Stacks transaction skipped during assembly") + Error::StacksTransactionSkipped(ref r) => { + write!( + f, + "Stacks transaction skipped during assembly due to: {}", + r + ) } } } @@ -269,7 +274,7 @@ impl error::Error for Error { Error::PoxAlreadyLocked => None, Error::PoxInsufficientBalance => None, Error::PoxNoRewardCycle => None, - Error::StacksTransactionSkipped => None, + Error::StacksTransactionSkipped(ref _r) => None, } } } @@ -302,7 +307,7 @@ impl Error { Error::PoxAlreadyLocked => "PoxAlreadyLocked", Error::PoxInsufficientBalance => "PoxInsufficientBalance", Error::PoxNoRewardCycle => "PoxNoRewardCycle", - Error::StacksTransactionSkipped => "StacksTransactionSkipped", + Error::StacksTransactionSkipped(ref _r) => "StacksTransactionSkipped", } } @@ -868,6 +873,8 @@ pub struct StacksBlockBuilder { prev_microblock_header: StacksMicroblockHeader, miner_privkey: StacksPrivateKey, miner_payouts: Option<(MinerReward, Vec, MinerReward)>, + parent_consensus_hash: ConsensusHash, + parent_header_hash: BlockHeaderHash, parent_microblock_hash: Option, miner_id: usize, } diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index dd43ef8147..f248262d13 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -68,6 +68,7 @@ use crate::types::chainstate::StacksBlockId; use crate::types::chainstate::StacksMicroblockHeader; use crate::types::proof::TrieHash; use crate::util::boot::{boot_code_acc, boot_code_addr, boot_code_id, boot_code_tx_auth}; +use crate::util::db::Error as db_error; use crate::util::secp256k1::MessageSignature; use types::chainstate::BurnchainHeaderHash; @@ -517,6 +518,11 @@ impl ClarityInstance { }) } + pub fn trie_exists_for_block(&mut self, bhh: &StacksBlockId) -> Result { + let mut datastore = self.datastore.begin_read_only(None); + datastore.trie_exists_for_block(bhh) + } + /// Evaluate program read-only at `at_block`. This will be evaluated in the Stacks epoch that /// was active *during* the evaluation of `at_block` pub fn eval_read_only( diff --git a/src/clarity_vm/database/marf.rs b/src/clarity_vm/database/marf.rs index a778f8ef69..a12b4a4a30 100644 --- a/src/clarity_vm/database/marf.rs +++ b/src/clarity_vm/database/marf.rs @@ -16,6 +16,7 @@ use vm::types::QualifiedContractIdentifier; use crate::types::chainstate::{BlockHeaderHash, StacksBlockHeader}; use crate::types::chainstate::{MARFValue, StacksBlockId}; use crate::types::proof::{ClarityMarfTrieId, TrieHash, TrieMerkleProof}; +use crate::util::db::Error as db_error; /// The MarfedKV struct is used to wrap a MARF data structure and side-storage /// for use as a K/V store for ClarityDB or the AnalysisDB. @@ -267,6 +268,13 @@ impl<'a> ReadOnlyMarfStore<'a> { pub fn as_analysis_db<'b>(&'b mut self) -> AnalysisDatabase<'b> { AnalysisDatabase::new(self) } + + pub fn trie_exists_for_block(&mut self, bhh: &StacksBlockId) -> Result { + self.marf.with_conn(|conn| match conn.has_block(bhh) { + Ok(res) => Ok(res), + Err(e) => Err(db_error::IndexError(e)), + }) + } } impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { diff --git a/src/codec/macros.rs b/src/codec/macros.rs index c280307f85..247e6c6a49 100644 --- a/src/codec/macros.rs +++ b/src/codec/macros.rs @@ -35,3 +35,10 @@ macro_rules! impl_byte_array_message_codec { } }; } + +// macro for determining how big an inv bitvec can be, given its bitlen +macro_rules! BITVEC_LEN { + ($bitvec:expr) => { + (($bitvec) / 8 + if ($bitvec) % 8 > 0 { 1 } else { 0 }) as u32 + }; +} diff --git a/src/core/mempool.rs b/src/core/mempool.rs index 9cf12b7693..1ee0e22fc1 100644 --- a/src/core/mempool.rs +++ b/src/core/mempool.rs @@ -15,8 +15,10 @@ // along with this program. If not, see . use std::cmp; +use std::collections::HashSet; use std::fs; -use std::io::Read; +use std::hash::Hasher; +use std::io::{Read, Write}; use std::ops::Deref; use std::ops::DerefMut; use std::path::{Path, PathBuf}; @@ -32,19 +34,22 @@ use rusqlite::Row; use rusqlite::Transaction; use rusqlite::NO_PARAMS; +use siphasher::sip::SipHasher; // this is SipHash-2-4 + use burnchains::Txid; use chainstate::burn::ConsensusHash; -use chainstate::stacks::TransactionPayload; use chainstate::stacks::{ - db::blocks::MemPoolRejection, db::ClarityTx, db::StacksChainState, index::Error as MarfError, - Error as ChainstateError, StacksTransaction, + db::blocks::MemPoolRejection, db::ClarityTx, db::StacksChainState, db::TxStreamData, + index::Error as MarfError, Error as ChainstateError, StacksTransaction, }; +use chainstate::stacks::{StacksMicroblock, TransactionPayload}; use core::ExecutionCost; use core::StacksEpochId; use core::FIRST_BURNCHAIN_CONSENSUS_HASH; use core::FIRST_STACKS_BLOCK_HASH; use monitoring::increment_stx_mempool_gc; use std::time::Instant; +use util::db::query_int; use util::db::query_row_columns; use util::db::query_rows; use util::db::sqlite_open; @@ -57,12 +62,19 @@ use util::db::{query_row, Error}; use util::db::{sql_pragma, DBConn, DBTx, FromRow}; use util::get_epoch_time_ms; use util::get_epoch_time_secs; +use util::hash::to_hex; +use util::hash::Sha512Trunc256Sum; use vm::types::PrincipalData; +use net::MemPoolSyncData; + +use util::bloom::{BloomCounter, BloomFilter, BloomNodeHasher}; + use clarity_vm::clarity::ClarityConnection; use crate::chainstate::stacks::events::StacksTransactionReceipt; use crate::chainstate::stacks::StacksBlock; +use crate::codec::Error as codec_error; use crate::codec::StacksMessageCodec; use crate::cost_estimates; use crate::cost_estimates::metrics::CostMetric; @@ -73,11 +85,63 @@ use crate::cost_estimates::UnitEstimator; use crate::monitoring; use crate::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockHeader}; use crate::util::db::table_exists; +use chainstate::stacks::miner::TransactionEvent; // maximum number of confirmations a transaction can have before it's garbage-collected pub const MEMPOOL_MAX_TRANSACTION_AGE: u64 = 256; pub const MAXIMUM_MEMPOOL_TX_CHAINING: u64 = 25; +// name of table for storing the counting bloom filter +pub const BLOOM_COUNTER_TABLE: &'static str = "txid_bloom_counter"; + +// bloom filter error rate +pub const BLOOM_COUNTER_ERROR_RATE: f64 = 0.001; + +// expected number of txs in the bloom filter +pub const MAX_BLOOM_COUNTER_TXS: u32 = 8192; + +// how far back in time (in Stacks blocks) does the bloom counter maintain tx records? +pub const BLOOM_COUNTER_DEPTH: usize = 2; + +// maximum many tx tags we'll send before sending a bloom filter instead. +// The parameter choice here is due to performance -- calculating a tag set can be slower than just +// loading the bloom filter, even though the bloom filter is larger. +const DEFAULT_MAX_TX_TAGS: u32 = 2048; + +/// A node-specific transaction tag -- the first 8 bytes of siphash(local-seed,txid) +#[derive(Debug, Clone, PartialEq, Hash, Eq)] +pub struct TxTag(pub [u8; 8]); + +impl TxTag { + pub fn from(seed: &[u8], txid: &Txid) -> TxTag { + let mut hasher = SipHasher::new(); + hasher.write(seed); + hasher.write(&txid.0); + + let result_64 = hasher.finish(); + TxTag(result_64.to_be_bytes()) + } +} + +impl std::fmt::Display for TxTag { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(f, "{}", &to_hex(&self.0)) + } +} + +impl StacksMessageCodec for TxTag { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + fd.write_all(&self.0).map_err(codec_error::WriteError)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let mut bytes = [0u8; 8]; + fd.read_exact(&mut bytes).map_err(codec_error::ReadError)?; + Ok(TxTag(bytes)) + } +} + pub struct MemPoolAdmitter { cur_block: BlockHeaderHash, cur_consensus_hash: ConsensusHash, @@ -153,6 +217,14 @@ pub trait MemPoolEventDispatcher { block_size_bytes: u64, consumed: &ExecutionCost, confirmed_microblock_cost: &ExecutionCost, + tx_results: Vec, + ); + fn mined_microblock_event( + &self, + microblock: &StacksMicroblock, + tx_results: Vec, + anchor_block_consensus_hash: ConsensusHash, + anchor_block: BlockHeaderHash, ); } @@ -278,8 +350,7 @@ impl FromRow<(u64, u64)> for (u64, u64) { } } -const MEMPOOL_INITIAL_SCHEMA: &'static [&'static str] = &[ - r#" +const MEMPOOL_INITIAL_SCHEMA: &'static [&'static str] = &[r#" CREATE TABLE mempool( txid TEXT NOT NULL, origin_address TEXT NOT NULL, @@ -297,15 +368,9 @@ const MEMPOOL_INITIAL_SCHEMA: &'static [&'static str] = &[ UNIQUE (origin_address, origin_nonce), UNIQUE (sponsor_address,sponsor_nonce) ); - "#, - "CREATE INDEX by_txid ON mempool(txid);", - "CREATE INDEX by_sponsor ON mempool(sponsor_address, sponsor_nonce);", - "CREATE INDEX by_origin ON mempool(origin_address, origin_nonce);", - "CREATE INDEX by_timestamp ON mempool(accept_time);", - "CREATE INDEX by_chaintip ON mempool(consensus_hash,block_header_hash);", -]; + "#]; -const MEMPOOL_SCHEMA_2: &'static [&'static str] = &[ +const MEMPOOL_SCHEMA_2_COST_ESTIMATOR: &'static [&'static str] = &[ r#" CREATE TABLE fee_estimates( txid TEXT NOT NULL, @@ -320,7 +385,6 @@ const MEMPOOL_SCHEMA_2: &'static [&'static str] = &[ r#" ALTER TABLE mempool ADD COLUMN last_known_sponsor_nonce INTEGER; "#, - "CREATE INDEX fee_by_txid ON fee_estimates(txid);", r#" CREATE TABLE schema_version (version NUMBER, PRIMARY KEY (version)); "#, @@ -329,10 +393,46 @@ const MEMPOOL_SCHEMA_2: &'static [&'static str] = &[ "#, ]; +const MEMPOOL_SCHEMA_3_BLOOM_STATE: &'static [&'static str] = &[ + r#" + CREATE TABLE IF NOT EXISTS removed_txids( + txid TEXT PRIMARY KEY NOT NULL, + FOREIGN KEY(txid) REFERENCES mempool(txid) ON DELETE CASCADE + ); + "#, + r#" + -- mapping between hash(local-seed,txid) and txid, used for randomized but efficient + -- paging when streaming transactions out of the mempool. + CREATE TABLE IF NOT EXISTS randomized_txids( + txid TEXT PRIMARY KEY NOT NULL, + hashed_txid TEXT NOT NULL, + FOREIGN KEY(txid) REFERENCES mempool(txid) ON DELETE CASCADE + ); + "#, + r#" + INSERT INTO schema_version (version) VALUES (3) + "#, +]; + +const MEMPOOL_INDEXES: &'static [&'static str] = &[ + "CREATE INDEX IF NOT EXISTS by_txid ON mempool(txid);", + "CREATE INDEX IF NOT EXISTS by_height ON mempool(height);", + "CREATE INDEX IF NOT EXISTS by_txid_and_height ON mempool(txid,height);", + "CREATE INDEX IF NOT EXISTS by_sponsor ON mempool(sponsor_address, sponsor_nonce);", + "CREATE INDEX IF NOT EXISTS by_origin ON mempool(origin_address, origin_nonce);", + "CREATE INDEX IF NOT EXISTS by_timestamp ON mempool(accept_time);", + "CREATE INDEX IF NOT EXISTS by_chaintip ON mempool(consensus_hash,block_header_hash);", + "CREATE INDEX IF NOT EXISTS fee_by_txid ON fee_estimates(txid);", + "CREATE INDEX IF NOT EXISTS by_ordered_hashed_txid ON randomized_txids(hashed_txid ASC);", + "CREATE INDEX IF NOT EXISTS by_hashed_txid ON randomized_txids(txid,hashed_txid);", +]; + pub struct MemPoolDB { - db: DBConn, + pub db: DBConn, path: String, admitter: MemPoolAdmitter, + bloom_counter: BloomCounter, + max_tx_tags: u32, cost_estimator: Box, metric: Box, } @@ -340,6 +440,7 @@ pub struct MemPoolDB { pub struct MemPoolTx<'a> { tx: DBTx<'a>, admitter: &'a mut MemPoolAdmitter, + bloom_counter: Option<&'a mut BloomCounter>, } impl<'a> Deref for MemPoolTx<'a> { @@ -356,13 +457,149 @@ impl<'a> DerefMut for MemPoolTx<'a> { } impl<'a> MemPoolTx<'a> { - pub fn new(tx: DBTx<'a>, admitter: &'a mut MemPoolAdmitter) -> MemPoolTx<'a> { - MemPoolTx { tx, admitter } + pub fn new( + tx: DBTx<'a>, + admitter: &'a mut MemPoolAdmitter, + bloom_counter: &'a mut BloomCounter, + ) -> MemPoolTx<'a> { + MemPoolTx { + tx, + admitter, + bloom_counter: Some(bloom_counter), + } + } + + pub fn with_bloom_state(tx: &mut MemPoolTx<'a>, f: F) -> R + where + F: FnOnce(&mut DBTx<'a>, &mut BloomCounter) -> R, + { + let mut bc = tx + .bloom_counter + .take() + .expect("BUG: did not replace bloom filter"); + let res = f(&mut tx.tx, &mut bc); + tx.bloom_counter.replace(bc); + res } pub fn commit(self) -> Result<(), db_error> { self.tx.commit().map_err(db_error::SqliteError) } + + /// Remove all txids at the given height from the bloom counter. + /// Used to clear out txids that are now outside the bloom counter's depth. + fn prune_bloom_counter(&mut self, target_height: u64) -> Result<(), MemPoolRejection> { + let sql = "SELECT a.txid FROM mempool AS a LEFT OUTER JOIN removed_txids AS b ON a.txid = b.txid WHERE b.txid IS NULL AND a.height = ?1"; + let args: &[&dyn ToSql] = &[&u64_to_sql(target_height)?]; + let txids: Vec = query_rows(&self.tx, sql, args)?; + let _num_txs = txids.len(); + + test_debug!("Prune bloom counter from height {}", target_height); + + // keep borrow-checker happy + MemPoolTx::with_bloom_state(self, |ref mut dbtx, ref mut bloom_counter| { + for txid in txids.into_iter() { + bloom_counter.remove_raw(dbtx, &txid.0)?; + + let sql = "INSERT OR REPLACE INTO removed_txids (txid) VALUES (?1)"; + let args: &[&dyn ToSql] = &[&txid]; + dbtx.execute(sql, args).map_err(db_error::SqliteError)?; + } + // help the type inference out + let res: Result<(), db_error> = Ok(()); + res + })?; + + test_debug!( + "Pruned bloom filter at height {}: removed {} txs", + target_height, + _num_txs + ); + Ok(()) + } + + /// Add the txid to the bloom counter in the mempool DB, optionally replacing a prior + /// transaction (identified by prior_txid) if the bloom counter is full. + /// If this is the first txid at this block height, then also garbage-collect the bloom counter to remove no-longer-recent transactions. + /// If the bloom counter is saturated -- i.e. it represents more than MAX_BLOOM_COUNTER_TXS + /// transactions -- then pick another transaction to evict from the bloom filter and return its txid. + /// (Note that no transactions are ever removed from the mempool; we just don't prioritize them + /// in the bloom filter). + fn update_bloom_counter( + &mut self, + height: u64, + txid: &Txid, + prior_txid: Option, + ) -> Result, MemPoolRejection> { + // is this the first-ever txid at this height? + let sql = "SELECT 1 FROM mempool WHERE height = ?1"; + let args: &[&dyn ToSql] = &[&u64_to_sql(height)?]; + let present: Option = query_row(&self.tx, sql, args)?; + if present.is_none() && height > (BLOOM_COUNTER_DEPTH as u64) { + // this is the first-ever tx at this height. + // which means, the bloom filter window has advanced. + // which means, we need to remove all the txs that are now out of the window. + self.prune_bloom_counter(height - (BLOOM_COUNTER_DEPTH as u64))?; + } + + MemPoolTx::with_bloom_state(self, |ref mut dbtx, ref mut bloom_counter| { + // remove replaced transaction + if let Some(prior_txid) = prior_txid { + bloom_counter.remove_raw(dbtx, &prior_txid.0)?; + } + + // keep the bloom counter un-saturated -- remove at most one transaction from it to keep + // the error rate at or below the target error rate + let evict_txid = { + let num_recents = MemPoolDB::get_num_recent_txs(&dbtx)?; + if num_recents >= MAX_BLOOM_COUNTER_TXS.into() { + // for now, remove lowest-fee tx in the recent tx set. + // TODO: In the future, do it by lowest fee rate + let sql = "SELECT a.txid FROM mempool AS a LEFT OUTER JOIN removed_txids AS b ON a.txid = b.txid WHERE b.txid IS NULL AND a.height > ?1 ORDER BY a.tx_fee ASC LIMIT 1"; + let args: &[&dyn ToSql] = &[&u64_to_sql( + height.saturating_sub(BLOOM_COUNTER_DEPTH as u64), + )?]; + let evict_txid: Option = query_row(&dbtx, sql, args)?; + if let Some(evict_txid) = evict_txid { + bloom_counter.remove_raw(dbtx, &evict_txid.0)?; + + let sql = "INSERT OR REPLACE INTO removed_txids (txid) VALUES (?1)"; + let args: &[&dyn ToSql] = &[&evict_txid]; + dbtx.execute(sql, args).map_err(db_error::SqliteError)?; + + Some(evict_txid) + } else { + None + } + } else { + None + } + }; + + // finally add the new transaction + bloom_counter.insert_raw(dbtx, &txid.0)?; + Ok(evict_txid) + }) + } + + /// Add the txid to our randomized page order + fn update_mempool_pager(&mut self, txid: &Txid) -> Result<(), MemPoolRejection> { + let mut randomized_buff = self + .bloom_counter + .as_ref() + .expect("BUG: did not instantiate bloom counter in mempool tx") + .get_seed() + .to_vec(); + randomized_buff.extend_from_slice(&txid.0); + let hashed_txid = Txid(Sha512Trunc256Sum::from_data(&randomized_buff).0); + + let sql = "INSERT OR REPLACE INTO randomized_txids (txid,hashed_txid) VALUES (?1,?2)"; + let args: &[&dyn ToSql] = &[txid, &hashed_txid]; + + self.execute(sql, args).map_err(db_error::SqliteError)?; + + Ok(()) + } } impl MemPoolTxInfo { @@ -407,16 +644,98 @@ impl MemPoolTxInfo { impl MemPoolDB { fn instantiate_mempool_db(conn: &mut DBConn) -> Result<(), db_error> { - let tx = tx_begin_immediate(conn)?; + let mut tx = tx_begin_immediate(conn)?; + // create initial mempool tables for cmd in MEMPOOL_INITIAL_SCHEMA { tx.execute_batch(cmd).map_err(db_error::SqliteError)?; } + // apply all migrations + MemPoolDB::apply_schema_migrations(&mut tx)?; + + // add all indexes + MemPoolDB::add_indexes(&mut tx)?; + tx.commit().map_err(db_error::SqliteError)?; Ok(()) } + /// Load the schema version from the database, if it's new enough to have such a version. + /// Returns Some(version) if a version can be loaded; None if not. + fn get_schema_version(conn: &DBConn) -> Result, db_error> { + let is_versioned = table_exists(conn, "schema_version")?; + if !is_versioned { + return Ok(None); + } + + let version = conn + .query_row( + "SELECT MAX(version) FROM schema_version", + rusqlite::NO_PARAMS, + |row| row.get(0), + ) + .optional()?; + + Ok(version) + } + + /// Apply all schema migrations up to the latest schema. + fn apply_schema_migrations(tx: &mut DBTx) -> Result<(), db_error> { + loop { + let version = MemPoolDB::get_schema_version(&tx)?.unwrap_or(1); + match version { + 1 => { + MemPoolDB::instantiate_cost_estimator(tx)?; + } + 2 => { + MemPoolDB::instantiate_bloom_state(tx)?; + } + 3 => { + break; + } + _ => { + panic!("Unknown schema version {}", version); + } + } + } + Ok(()) + } + + /// Add indexes + fn add_indexes(tx: &mut DBTx) -> Result<(), db_error> { + for cmd in MEMPOOL_INDEXES { + tx.execute_batch(cmd).map_err(db_error::SqliteError)?; + } + Ok(()) + } + + /// Instantiate the on-disk counting bloom filter + fn instantiate_bloom_state(tx: &mut DBTx) -> Result<(), db_error> { + let node_hasher = BloomNodeHasher::new_random(); + let _ = BloomCounter::new( + tx, + BLOOM_COUNTER_TABLE, + BLOOM_COUNTER_ERROR_RATE, + MAX_BLOOM_COUNTER_TXS, + node_hasher, + )?; + + for cmd in MEMPOOL_SCHEMA_3_BLOOM_STATE { + tx.execute_batch(cmd).map_err(db_error::SqliteError)?; + } + Ok(()) + } + + /// Instantiate the cost estimator schema + fn instantiate_cost_estimator(tx: &DBTx) -> Result<(), db_error> { + for sql_exec in MEMPOOL_SCHEMA_2_COST_ESTIMATOR { + tx.execute_batch(sql_exec)?; + } + + Ok(()) + } + pub fn db_path(chainstate_root_path: &str) -> Result { let mut path = PathBuf::from(chainstate_root_path); @@ -478,51 +797,27 @@ impl MemPoolDB { if create_flag { // instantiate! MemPoolDB::instantiate_mempool_db(&mut conn)?; + } else { + let mut tx = tx_begin_immediate(&mut conn)?; + MemPoolDB::apply_schema_migrations(&mut tx)?; + MemPoolDB::add_indexes(&mut tx)?; + tx.commit().map_err(db_error::SqliteError)?; } - let tx = conn.transaction()?; - let version = MemPoolDB::get_schema_version(&tx)?.unwrap_or(1); - - if version < 2 { - MemPoolDB::apply_schema_2(&tx)?; - } - - tx.commit()?; + let bloom_counter = BloomCounter::::try_load(&conn, BLOOM_COUNTER_TABLE)? + .ok_or(db_error::Other(format!("Failed to load bloom counter")))?; Ok(MemPoolDB { db: conn, path: db_path, - admitter, + admitter: admitter, + bloom_counter, + max_tx_tags: DEFAULT_MAX_TX_TAGS, cost_estimator, metric, }) } - fn get_schema_version(conn: &DBConn) -> Result, db_error> { - let is_versioned = table_exists(conn, "schema_version")?; - if !is_versioned { - return Ok(None); - } - - let version = conn - .query_row( - "SELECT MAX(version) FROM schema_version", - rusqlite::NO_PARAMS, - |row| row.get(0), - ) - .optional()?; - - Ok(version) - } - - fn apply_schema_2(tx: &Transaction) -> Result<(), db_error> { - for sql_exec in MEMPOOL_SCHEMA_2 { - tx.execute_batch(sql_exec)?; - } - - Ok(()) - } - pub fn reset_last_known_nonces(&mut self) -> Result<(), db_error> { let sql = "UPDATE mempool SET last_known_origin_nonce = NULL, last_known_sponsor_nonce = NULL"; @@ -714,7 +1009,7 @@ impl MemPoolDB { /// highest-fee-first order. This method is interruptable -- in the `settings` struct, the /// caller may choose how long to spend iterating before this method stops. /// - /// Returns the number of transactions considered on success. + /// `todo` returns a boolean representing whether or not to keep iterating. pub fn iterate_candidates( &mut self, clarity_tx: &mut C, @@ -811,10 +1106,14 @@ impl MemPoolDB { pub fn tx_begin<'a>(&'a mut self) -> Result, db_error> { let tx = tx_begin_immediate(&mut self.db)?; - Ok(MemPoolTx::new(tx, &mut self.admitter)) + Ok(MemPoolTx::new( + tx, + &mut self.admitter, + &mut self.bloom_counter, + )) } - fn db_has_tx(conn: &DBConn, txid: &Txid) -> Result { + pub fn db_has_tx(conn: &DBConn, txid: &Txid) -> Result { query_row( conn, "SELECT 1 FROM mempool WHERE txid = ?1", @@ -895,7 +1194,7 @@ impl MemPoolDB { /// Get a transaction's metadata, given address and nonce, and whether the address is used as a sponsor or an origin. /// Faster than getting the MemPoolTxInfo, since no deserialization will be needed. /// Used to see if there exists a transaction with this info, so as to implement replace-by-fee - fn get_tx_metadata_by_address( + pub fn get_tx_metadata_by_address( conn: &DBConn, is_origin: bool, addr: &StacksAddress, @@ -959,8 +1258,9 @@ impl MemPoolDB { /// Add a transaction to the mempool. If it already exists, then replace it if the given fee /// is higher than the one that's already there. /// Carry out the mempool admission test before adding. - /// Don't call directly; use submit() - fn try_add_tx( + /// Don't call directly; use submit(). + /// This is `pub` only for testing. + pub fn try_add_tx( tx: &mut MemPoolTx, chainstate: &mut StacksChainState, consensus_hash: &ConsensusHash, @@ -1038,6 +1338,8 @@ impl MemPoolDB { return Err(MemPoolRejection::ConflictingNonceInMempool); } + tx.update_bloom_counter(height, &txid, prior_tx.as_ref().map(|tx| tx.txid.clone()))?; + let sql = "INSERT OR REPLACE INTO mempool ( txid, origin_address, @@ -1071,6 +1373,8 @@ impl MemPoolDB { tx.execute(sql, args) .map_err(|e| MemPoolRejection::DBError(db_error::SqliteError(e)))?; + tx.update_mempool_pager(&txid)?; + // broadcast drop event if a tx is being replaced if let (Some(prior_tx), Some(event_observer)) = (prior_tx, event_observer) { event_observer.mempool_txs_dropped(vec![prior_tx.txid], replace_reason); @@ -1360,1007 +1664,296 @@ impl MemPoolDB { } } } -} -#[cfg(test)] -mod tests { - use address::AddressHashMode; - use burnchains::Address; - use chainstate::burn::ConsensusHash; - use chainstate::stacks::db::test::chainstate_path; - use chainstate::stacks::db::test::instantiate_chainstate; - use chainstate::stacks::db::test::instantiate_chainstate_with_balances; - use chainstate::stacks::test::codec_all_transactions; - use chainstate::stacks::{ - db::blocks::MemPoolRejection, db::StacksChainState, index::MarfTrieId, CoinbasePayload, - Error as ChainstateError, SinglesigHashMode, SinglesigSpendingCondition, StacksPrivateKey, - StacksPublicKey, StacksTransaction, StacksTransactionSigner, TokenTransferMemo, - TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, - TransactionPostConditionMode, TransactionPublicKeyEncoding, TransactionSmartContract, - TransactionSpendingCondition, TransactionVersion, - }; - use chainstate::stacks::{ - C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - }; - use core::mempool::MemPoolWalkSettings; - use core::FIRST_BURNCHAIN_CONSENSUS_HASH; - use core::FIRST_STACKS_BLOCK_HASH; - use net::Error as NetError; - use util::db::{DBConn, FromRow}; - use util::hash::Hash160; - use util::secp256k1::MessageSignature; - use util::{hash::hex_bytes, hash::to_hex, hash::*, log, secp256k1::*, strings::StacksString}; - use vm::{ - database::HeadersDB, - errors::Error as ClarityError, - errors::RuntimeErrorType, - tests::TEST_BURN_STATE_DB, - types::{PrincipalData, QualifiedContractIdentifier}, - ClarityName, ContractName, Value, - }; - - use crate::codec::StacksMessageCodec; - use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash}; - use crate::types::chainstate::{ - StacksAddress, StacksBlockHeader, StacksBlockId, StacksMicroblockHeader, StacksWorkScore, - VRFSeed, - }; - use crate::types::proof::TrieHash; - use crate::{ - chainstate::stacks::db::StacksHeaderInfo, util::vrf::VRFProof, vm::costs::ExecutionCost, - }; - - use super::MemPoolDB; - - const FOO_CONTRACT: &'static str = "(define-public (foo) (ok 1)) - (define-public (bar (x uint)) (ok x))"; - const SK_1: &'static str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; - const SK_2: &'static str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; - const SK_3: &'static str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; - - #[test] - fn mempool_db_init() { - let _chainstate = instantiate_chainstate(false, 0x80000000, "mempool_db_init"); - let chainstate_path = chainstate_path("mempool_db_init"); - let _mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); - } - - fn make_block( - chainstate: &mut StacksChainState, - block_consensus: ConsensusHash, - parent: &(ConsensusHash, BlockHeaderHash), - burn_height: u64, - block_height: u64, - ) -> (ConsensusHash, BlockHeaderHash) { - let (mut chainstate_tx, clar_tx) = chainstate.chainstate_tx_begin().unwrap(); - - let anchored_header = StacksBlockHeader { - version: 1, - total_work: StacksWorkScore { - work: block_height, - burn: 1, - }, - proof: VRFProof::empty(), - parent_block: parent.1.clone(), - parent_microblock: BlockHeaderHash([0; 32]), - parent_microblock_sequence: 0, - tx_merkle_root: Sha512Trunc256Sum::empty(), - state_index_root: TrieHash::from_empty_data(), - microblock_pubkey_hash: Hash160([0; 20]), - }; - - let block_hash = anchored_header.block_hash(); - - let c_tx = StacksChainState::chainstate_block_begin( - &chainstate_tx, - clar_tx, - &TEST_BURN_STATE_DB, - &parent.0, - &parent.1, - &block_consensus, - &block_hash, - ); - - let new_tip_info = StacksHeaderInfo { - anchored_header, - microblock_tail: None, - index_root: TrieHash::from_empty_data(), - block_height, - consensus_hash: block_consensus.clone(), - burn_header_hash: BurnchainHeaderHash([0; 32]), - burn_header_height: burn_height as u32, - burn_header_timestamp: 0, - anchored_block_size: 1, - }; - - c_tx.commit_block(); - - let new_index_hash = StacksBlockId::new(&block_consensus, &block_hash); - - chainstate_tx - .put_indexed_begin(&StacksBlockId::new(&parent.0, &parent.1), &new_index_hash) - .unwrap(); - - StacksChainState::insert_stacks_block_header( - &mut chainstate_tx, - &new_index_hash, - &new_tip_info, - &ExecutionCost::zero(), - ) - .unwrap(); - - chainstate_tx.commit().unwrap(); - - (block_consensus, block_hash) + /// Get the bloom filter that represents the set of recent transactions we have + pub fn get_txid_bloom_filter(&self) -> Result, db_error> { + self.bloom_counter.to_bloom_filter(&self.conn()) } - #[test] - fn mempool_walk_over_fork() { - let mut chainstate = instantiate_chainstate_with_balances( - false, - 0x80000000, - "mempool_walk_over_fork", - vec![], - ); - - // genesis -> b_1* -> b_2* - // \-> b_3 -> b_4 - // - // *'d blocks accept transactions, - // try to walk at b_4, we should be able to find - // the transaction at b_1 - - let b_1 = make_block( - &mut chainstate, - ConsensusHash([0x1; 20]), - &( - FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), - FIRST_STACKS_BLOCK_HASH.clone(), - ), - 1, - 1, - ); - let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); - let b_5 = make_block(&mut chainstate, ConsensusHash([0x5; 20]), &b_2, 5, 3); - let b_3 = make_block(&mut chainstate, ConsensusHash([0x3; 20]), &b_1, 3, 2); - let b_4 = make_block(&mut chainstate, ConsensusHash([0x4; 20]), &b_3, 4, 3); - - let chainstate_path = chainstate_path("mempool_walk_over_fork"); - let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); - - let mut all_txs = codec_all_transactions( - &TransactionVersion::Testnet, - 0x80000000, - &TransactionAnchorMode::Any, - &TransactionPostConditionMode::Allow, - ); - - let blocks_to_broadcast_in = [&b_1, &b_2, &b_4]; - let mut txs = [ - all_txs.pop().unwrap(), - all_txs.pop().unwrap(), - all_txs.pop().unwrap(), - ]; - for tx in txs.iter_mut() { - tx.set_tx_fee(123); - } - - for ix in 0..3 { - let mut mempool_tx = mempool.tx_begin().unwrap(); - - let block = &blocks_to_broadcast_in[ix]; - let good_tx = &txs[ix]; - - let origin_address = StacksAddress { - version: 22, - bytes: Hash160::from_data(&[ix as u8; 32]), - }; - let sponsor_address = StacksAddress { - version: 22, - bytes: Hash160::from_data(&[0x80 | (ix as u8); 32]), - }; - - let txid = good_tx.txid(); - let tx_bytes = good_tx.serialize_to_vec(); - let tx_fee = good_tx.get_tx_fee(); - - let height = 1 + ix as u64; - - let origin_nonce = 0; // (2 * ix + i) as u64; - let sponsor_nonce = 0; // (2 * ix + i) as u64; - - assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - - MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &block.0, - &block.1, - txid, - tx_bytes, - tx_fee, - height, - &origin_address, - origin_nonce, - &sponsor_address, - sponsor_nonce, - None, - ) - .unwrap(); - - assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - - mempool_tx.commit().unwrap(); + /// Find maximum height represented in the mempool + pub fn get_max_height(conn: &DBConn) -> Result, db_error> { + let sql = "SELECT 1 FROM mempool WHERE height >= 0"; + let count = query_rows::(conn, sql, NO_PARAMS)?.len(); + if count == 0 { + Ok(None) + } else { + let sql = "SELECT MAX(height) FROM mempool"; + Ok(Some(query_int(conn, sql, NO_PARAMS)? as u64)) } - - // genesis -> b_1* -> b_2* -> b_5 - // \-> b_3 -> b_4 - // - // *'d blocks accept transactions, - // try to walk at b_4, we should be able to find - // the transaction at b_1 - - let mut mempool_settings = MemPoolWalkSettings::default(); - mempool_settings.min_tx_fee = 10; - - chainstate.with_read_only_clarity_tx( - &TEST_BURN_STATE_DB, - &StacksBlockHeader::make_index_block_hash(&b_2.0, &b_2.1), - |clarity_conn| { - let mut count_txs = 0; - mempool - .iterate_candidates::<_, ChainstateError, _>( - clarity_conn, - 2, - mempool_settings.clone(), - |_, available_tx, _| { - count_txs += 1; - Ok(true) - }, - ) - .unwrap(); - assert_eq!( - count_txs, 3, - "Mempool should find three transactions from b_2" - ); - }, - ); - - // Now that the mempool has iterated over those transactions, its view of the - // nonce for the origin address should have changed. Now it should find *no* transactions. - chainstate.with_read_only_clarity_tx( - &TEST_BURN_STATE_DB, - &StacksBlockHeader::make_index_block_hash(&b_2.0, &b_2.1), - |clarity_conn| { - let mut count_txs = 0; - mempool - .iterate_candidates::<_, ChainstateError, _>( - clarity_conn, - 2, - mempool_settings.clone(), - |_, available_tx, _| { - count_txs += 1; - Ok(true) - }, - ) - .unwrap(); - assert_eq!(count_txs, 0, "Mempool should find no transactions"); - }, - ); - - mempool - .reset_last_known_nonces() - .expect("Should be able to reset nonces"); - - chainstate.with_read_only_clarity_tx( - &TEST_BURN_STATE_DB, - &StacksBlockHeader::make_index_block_hash(&b_5.0, &b_5.1), - |clarity_conn| { - let mut count_txs = 0; - mempool - .iterate_candidates::<_, ChainstateError, _>( - clarity_conn, - 3, - mempool_settings.clone(), - |_, available_tx, _| { - count_txs += 1; - Ok(true) - }, - ) - .unwrap(); - assert_eq!( - count_txs, 3, - "Mempool should find three transactions from b_5" - ); - }, - ); - - mempool - .reset_last_known_nonces() - .expect("Should be able to reset nonces"); - - // The mempool iterator no longer does any consideration of what block accepted - // the transaction, so b_3 should have the same view. - chainstate.with_read_only_clarity_tx( - &TEST_BURN_STATE_DB, - &StacksBlockHeader::make_index_block_hash(&b_3.0, &b_3.1), - |clarity_conn| { - let mut count_txs = 0; - mempool - .iterate_candidates::<_, ChainstateError, _>( - clarity_conn, - 2, - mempool_settings.clone(), - |_, available_tx, _| { - count_txs += 1; - Ok(true) - }, - ) - .unwrap(); - assert_eq!( - count_txs, 3, - "Mempool should find three transactions from b_3" - ); - }, - ); - - mempool - .reset_last_known_nonces() - .expect("Should be able to reset nonces"); - - chainstate.with_read_only_clarity_tx( - &TEST_BURN_STATE_DB, - &StacksBlockHeader::make_index_block_hash(&b_4.0, &b_4.1), - |clarity_conn| { - let mut count_txs = 0; - mempool - .iterate_candidates::<_, ChainstateError, _>( - clarity_conn, - 3, - mempool_settings.clone(), - |_, available_tx, _| { - count_txs += 1; - Ok(true) - }, - ) - .unwrap(); - assert_eq!( - count_txs, 3, - "Mempool should find three transactions from b_4" - ); - }, - ); - - mempool - .reset_last_known_nonces() - .expect("Should be able to reset nonces"); - - // let's test replace-across-fork while we're here. - // first try to replace a tx in b_2 in b_1 - should fail because they are in the same fork - let mut mempool_tx = mempool.tx_begin().unwrap(); - let block = &b_1; - let tx = &txs[1]; - let origin_address = StacksAddress { - version: 22, - bytes: Hash160::from_data(&[1; 32]), - }; - let sponsor_address = StacksAddress { - version: 22, - bytes: Hash160::from_data(&[0x81; 32]), - }; - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let tx_fee = tx.get_tx_fee(); - - let height = 3; - let origin_nonce = 0; - let sponsor_nonce = 0; - - // make sure that we already have the transaction we're testing for replace-across-fork - assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - - assert!(MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &block.0, - &block.1, - txid, - tx_bytes, - tx_fee, - height, - &origin_address, - origin_nonce, - &sponsor_address, - sponsor_nonce, - None, - ) - .is_err()); - - assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - mempool_tx.commit().unwrap(); - - // now try replace-across-fork from b_2 to b_4 - // check that the number of transactions at b_2 and b_4 starts at 1 each - assert_eq!( - MemPoolDB::get_num_tx_at_block(&mempool.db, &b_4.0, &b_4.1).unwrap(), - 1 - ); - assert_eq!( - MemPoolDB::get_num_tx_at_block(&mempool.db, &b_2.0, &b_2.1).unwrap(), - 1 - ); - let mut mempool_tx = mempool.tx_begin().unwrap(); - let block = &b_4; - let tx = &txs[1]; - let origin_address = StacksAddress { - version: 22, - bytes: Hash160::from_data(&[0; 32]), - }; - let sponsor_address = StacksAddress { - version: 22, - bytes: Hash160::from_data(&[1; 32]), - }; - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let tx_fee = tx.get_tx_fee(); - - let height = 3; - let origin_nonce = 1; - let sponsor_nonce = 1; - - // make sure that we already have the transaction we're testing for replace-across-fork - assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - - MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &block.0, - &block.1, - txid, - tx_bytes, - tx_fee, - height, - &origin_address, - origin_nonce, - &sponsor_address, - sponsor_nonce, - None, - ) - .unwrap(); - - assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - - mempool_tx.commit().unwrap(); - - // after replace-across-fork, tx[1] should have moved from the b_2->b_5 fork to b_4 - assert_eq!( - MemPoolDB::get_num_tx_at_block(&mempool.db, &b_4.0, &b_4.1).unwrap(), - 2 - ); - assert_eq!( - MemPoolDB::get_num_tx_at_block(&mempool.db, &b_2.0, &b_2.1).unwrap(), - 0 - ); } - #[test] - fn mempool_do_not_replace_tx() { - let mut chainstate = instantiate_chainstate_with_balances( - false, - 0x80000000, - "mempool_do_not_replace_tx", - vec![], - ); - - // genesis -> b_1 -> b_2 - // \-> b_3 - // - let b_1 = make_block( - &mut chainstate, - ConsensusHash([0x1; 20]), - &( - FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), - FIRST_STACKS_BLOCK_HASH.clone(), - ), - 1, - 1, - ); - let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); - let b_3 = make_block(&mut chainstate, ConsensusHash([0x3; 20]), &b_1, 1, 1); - - let chainstate_path = chainstate_path("mempool_do_not_replace_tx"); - let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); - - let mut txs = codec_all_transactions( - &TransactionVersion::Testnet, - 0x80000000, - &TransactionAnchorMode::Any, - &TransactionPostConditionMode::Allow, - ); - let mut tx = txs.pop().unwrap(); - - let mut mempool_tx = mempool.tx_begin().unwrap(); - - // do an initial insert - let origin_address = StacksAddress { - version: 22, - bytes: Hash160::from_data(&[0; 32]), - }; - let sponsor_address = StacksAddress { - version: 22, - bytes: Hash160::from_data(&[1; 32]), + /// Get the transaction ID list that represents the set of transactions that are represented in + /// the bloom counter. + pub fn get_bloom_txids(&self) -> Result, db_error> { + let max_height = match MemPoolDB::get_max_height(&self.conn())? { + Some(h) => h, + None => { + // mempool is empty + return Ok(vec![]); + } }; + let min_height = max_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64); + let sql = "SELECT mempool.txid FROM mempool WHERE height > ?1 AND height <= ?2 AND NOT EXISTS (SELECT 1 FROM removed_txids WHERE txid = mempool.txid)"; + let args: &[&dyn ToSql] = &[&u64_to_sql(min_height)?, &u64_to_sql(max_height)?]; + query_rows(&self.conn(), sql, args) + } - tx.set_tx_fee(123); - - // test insert - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - - let tx_fee = tx.get_tx_fee(); - let height = 100; + /// Get the transaction tag list that represents the set of recent transactions we have. + /// Generate them with our node-local seed so that our txtag list is different from anyone + /// else's, with high probability. + pub fn get_txtags(&self, seed: &[u8]) -> Result, db_error> { + self.get_bloom_txids().map(|txid_list| { + txid_list + .iter() + .map(|txid| TxTag::from(seed, txid)) + .collect() + }) + } - let origin_nonce = tx.get_origin_nonce(); - let sponsor_nonce = match tx.get_sponsor_nonce() { - Some(n) => n, - None => origin_nonce, + /// How many recent transactions are there -- i.e. within BLOOM_COUNTER_DEPTH block heights of + /// the chain tip? + pub fn get_num_recent_txs(conn: &DBConn) -> Result { + let max_height = match MemPoolDB::get_max_height(conn)? { + Some(h) => h, + None => { + // mempool is empty + return Ok(0); + } }; + let min_height = max_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64); + let sql = "SELECT COUNT(txid) FROM mempool WHERE height > ?1 AND height <= ?2"; + let args: &[&dyn ToSql] = &[&u64_to_sql(min_height)?, &u64_to_sql(max_height)?]; + query_int(conn, sql, args).map(|cnt| cnt as u64) + } - assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - - MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &b_1.0, - &b_1.1, - txid, - tx_bytes, - tx_fee, - height, - &origin_address, - origin_nonce, - &sponsor_address, - sponsor_nonce, - None, - ) - .unwrap(); - - assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - - let prior_txid = txid.clone(); - - // now, let's try inserting again, with a lower fee, but at a different block hash - tx.set_tx_fee(100); - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let tx_fee = tx.get_tx_fee(); - let height = 100; + /// Make a mempool sync request. + /// If sufficiently sparse, use a MemPoolSyncData::TxTags variant + /// Otherwise, use a MemPoolSyncData::BloomFilter variant + pub fn make_mempool_sync_data(&self) -> Result { + let num_tags = MemPoolDB::get_num_recent_txs(self.conn())?; + if num_tags < self.max_tx_tags.into() { + let seed = self.bloom_counter.get_seed().clone(); + let tags = self.get_txtags(&seed)?; + Ok(MemPoolSyncData::TxTags(seed, tags)) + } else { + Ok(MemPoolSyncData::BloomFilter(self.get_txid_bloom_filter()?)) + } + } - let err_resp = MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &b_2.0, - &b_2.1, - txid, - tx_bytes, - tx_fee, - height, - &origin_address, - origin_nonce, - &sponsor_address, - sponsor_nonce, - None, - ) - .unwrap_err(); - assert!(match err_resp { - MemPoolRejection::ConflictingNonceInMempool => true, - _ => false, - }); - - assert!(MemPoolDB::db_has_tx(&mempool_tx, &prior_txid).unwrap()); - assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - } - - #[test] - fn mempool_db_load_store_replace_tx() { - let mut chainstate = - instantiate_chainstate(false, 0x80000000, "mempool_db_load_store_replace_tx"); - let chainstate_path = chainstate_path("mempool_db_load_store_replace_tx"); - let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); - - let mut txs = codec_all_transactions( - &TransactionVersion::Testnet, - 0x80000000, - &TransactionAnchorMode::Any, - &TransactionPostConditionMode::Allow, - ); - let num_txs = txs.len() as u64; + /// Get the hashed txid for a txid + pub fn get_randomized_txid(&self, txid: &Txid) -> Result, db_error> { + let sql = "SELECT hashed_txid FROM randomized_txids WHERE txid = ?1 LIMIT 1"; + let args: &[&dyn ToSql] = &[txid]; + query_row(&self.conn(), sql, args) + } - let mut mempool_tx = mempool.tx_begin().unwrap(); + /// Get the next batch of transactions from our mempool that are *not* represented in the given + /// MemPoolSyncData. Transactions are ordered lexicographically by randomized_txids.hashed_txid, since this allows us + /// to use the txid as a cursor while ensuring that each node returns txids in a deterministic random order + /// (so if some nodes are configured to return fewer than MAX_BLOOM_COUNTER_TXS transactions, + /// a requesting node will still have a good chance of getting something useful). + /// Also, return the next value to pass for `last_randomized_txid` to load the next page. + /// Also, return the number of rows considered. + pub fn find_next_missing_transactions( + &self, + data: &MemPoolSyncData, + height: u64, + last_randomized_txid: &Txid, + max_txs: u64, + max_run: u64, + ) -> Result<(Vec, Option, u64), db_error> { + let mut ret = vec![]; + let sql = "SELECT mempool.txid AS txid, mempool.tx AS tx, randomized_txids.hashed_txid AS hashed_txid \ + FROM mempool JOIN randomized_txids \ + ON mempool.txid = randomized_txids.txid \ + WHERE randomized_txids.hashed_txid > ?1 \ + AND mempool.height > ?2 \ + AND NOT EXISTS \ + (SELECT 1 FROM removed_txids WHERE txid = mempool.txid) \ + ORDER BY randomized_txids.hashed_txid ASC LIMIT ?3"; - eprintln!("add all txs"); - for (i, mut tx) in txs.drain(..).enumerate() { - // make sure each address is unique per tx (not the case in codec_all_transactions) - let origin_address = StacksAddress { - version: 22, - bytes: Hash160::from_data(&i.to_be_bytes()), - }; - let sponsor_address = StacksAddress { - version: 22, - bytes: Hash160::from_data(&(i + 1).to_be_bytes()), - }; + let args: &[&dyn ToSql] = &[ + &last_randomized_txid, + &u64_to_sql(height.saturating_sub(BLOOM_COUNTER_DEPTH as u64))?, + &u64_to_sql(max_run)?, + ]; - tx.set_tx_fee(123); + let mut tags_table = HashSet::new(); + if let MemPoolSyncData::TxTags(_, ref tags) = data { + for tag in tags.iter() { + tags_table.insert(tag.clone()); + } + } - // test insert + let mut stmt = self.conn().prepare(sql)?; + let mut rows = stmt.query(args)?; + let mut num_rows_visited = 0; + let mut next_page = None; + while let Some(row) = rows.next()? { + if num_rows_visited >= max_run { + break; + } - let txid = tx.txid(); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - let expected_tx = tx.clone(); + let txid = Txid::from_column(row, "txid")?; + num_rows_visited += 1; - let tx_fee = tx.get_tx_fee(); - let height = 100; - let origin_nonce = tx.get_origin_nonce(); - let sponsor_nonce = match tx.get_sponsor_nonce() { - Some(n) => n, - None => origin_nonce, - }; - let len = tx_bytes.len() as u64; - - assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - - MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &ConsensusHash([0x1; 20]), - &BlockHeaderHash([0x2; 32]), - txid, - tx_bytes, - tx_fee, - height, - &origin_address, - origin_nonce, - &sponsor_address, - sponsor_nonce, - None, - ) - .unwrap(); - - assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - - // test retrieval - let tx_info_opt = MemPoolDB::get_tx(&mempool_tx, &txid).unwrap(); - let tx_info = tx_info_opt.unwrap(); - - assert_eq!(tx_info.tx, expected_tx); - assert_eq!(tx_info.metadata.len, len); - assert_eq!(tx_info.metadata.tx_fee, 123); - assert_eq!(tx_info.metadata.origin_address, origin_address); - assert_eq!(tx_info.metadata.origin_nonce, origin_nonce); - assert_eq!(tx_info.metadata.sponsor_address, sponsor_address); - assert_eq!(tx_info.metadata.sponsor_nonce, sponsor_nonce); - assert_eq!(tx_info.metadata.consensus_hash, ConsensusHash([0x1; 20])); - assert_eq!( - tx_info.metadata.block_header_hash, - BlockHeaderHash([0x2; 32]) + let hashed_txid = Txid::from_column(row, "hashed_txid")?; + test_debug!( + "Consider txid {} ({}) at or after {}", + &txid, + &hashed_txid, + last_randomized_txid ); - assert_eq!(tx_info.metadata.block_height, height); - - // test replace-by-fee with a higher fee - let old_txid = txid; + next_page = Some(hashed_txid); - tx.set_tx_fee(124); - assert!(txid != tx.txid()); - - let txid = tx.txid(); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - let expected_tx = tx.clone(); - let tx_fee = tx.get_tx_fee(); - - assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - - let tx_info_before = MemPoolDB::get_tx_metadata_by_address( - &mempool_tx, - true, - &origin_address, - origin_nonce, - ) - .unwrap() - .unwrap(); - assert_eq!(tx_info_before, tx_info.metadata); - - MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &ConsensusHash([0x1; 20]), - &BlockHeaderHash([0x2; 32]), - txid, - tx_bytes, - tx_fee, - height, - &origin_address, - origin_nonce, - &sponsor_address, - sponsor_nonce, - None, - ) - .unwrap(); - - // was replaced - assert!(!MemPoolDB::db_has_tx(&mempool_tx, &old_txid).unwrap()); - assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + let contains = match data { + MemPoolSyncData::BloomFilter(ref bf) => bf.contains_raw(&txid.0), + MemPoolSyncData::TxTags(ref seed, ..) => { + tags_table.contains(&TxTag::from(seed, &txid)) + } + }; + if contains { + // remote peer already has this one + continue; + } - let tx_info_after = MemPoolDB::get_tx_metadata_by_address( - &mempool_tx, - true, - &origin_address, - origin_nonce, - ) - .unwrap() - .unwrap(); - assert!(tx_info_after != tx_info.metadata); - - // test retrieval -- transaction should have been replaced because it has a higher - // estimated fee - let tx_info_opt = MemPoolDB::get_tx(&mempool_tx, &txid).unwrap(); - - let tx_info = tx_info_opt.unwrap(); - assert_eq!(tx_info.metadata, tx_info_after); - - assert_eq!(tx_info.tx, expected_tx); - assert_eq!(tx_info.metadata.len, len); - assert_eq!(tx_info.metadata.tx_fee, 124); - assert_eq!(tx_info.metadata.origin_address, origin_address); - assert_eq!(tx_info.metadata.origin_nonce, origin_nonce); - assert_eq!(tx_info.metadata.sponsor_address, sponsor_address); - assert_eq!(tx_info.metadata.sponsor_nonce, sponsor_nonce); - assert_eq!(tx_info.metadata.consensus_hash, ConsensusHash([0x1; 20])); - assert_eq!( - tx_info.metadata.block_header_hash, - BlockHeaderHash([0x2; 32]) - ); - assert_eq!(tx_info.metadata.block_height, height); - - // test replace-by-fee with a lower fee - let old_txid = txid; - - tx.set_tx_fee(122); - assert!(txid != tx.txid()); - - let txid = tx.txid(); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - let _expected_tx = tx.clone(); - let tx_fee = tx.get_tx_fee(); - - assert!(match MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &ConsensusHash([0x1; 20]), - &BlockHeaderHash([0x2; 32]), - txid, - tx_bytes, - tx_fee, - height, - &origin_address, - origin_nonce, - &sponsor_address, - sponsor_nonce, - None, - ) - .unwrap_err() - { - MemPoolRejection::ConflictingNonceInMempool => true, - _ => false, - }); + let tx_bytes: Vec = row.get_unwrap("tx"); + let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]) + .map_err(|_e| db_error::ParseError)?; - // was NOT replaced - assert!(MemPoolDB::db_has_tx(&mempool_tx, &old_txid).unwrap()); - assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + test_debug!("Returning txid {}", &txid); + ret.push(tx); + if (ret.len() as u64) >= max_txs { + break; + } } - mempool_tx.commit().unwrap(); - eprintln!("get all txs"); - let txs = MemPoolDB::get_txs_after( - &mempool.db, - &ConsensusHash([0x1; 20]), - &BlockHeaderHash([0x2; 32]), - 0, - num_txs, - ) - .unwrap(); - assert_eq!(txs.len() as u64, num_txs); - - eprintln!("get empty txs"); - let txs = MemPoolDB::get_txs_after( - &mempool.db, - &ConsensusHash([0x1; 20]), - &BlockHeaderHash([0x3; 32]), - 0, - num_txs, - ) - .unwrap(); - assert_eq!(txs.len(), 0); - - eprintln!("get empty txs"); - let txs = MemPoolDB::get_txs_after( - &mempool.db, - &ConsensusHash([0x2; 20]), - &BlockHeaderHash([0x2; 32]), - 0, - num_txs, - ) - .unwrap(); - assert_eq!(txs.len(), 0); - - eprintln!("garbage-collect"); - let mut mempool_tx = mempool.tx_begin().unwrap(); - MemPoolDB::garbage_collect(&mut mempool_tx, 101, None).unwrap(); - mempool_tx.commit().unwrap(); - - let txs = MemPoolDB::get_txs_after( - &mempool.db, - &ConsensusHash([0x1; 20]), - &BlockHeaderHash([0x2; 32]), - 0, - num_txs, - ) - .unwrap(); - assert_eq!(txs.len(), 0); - } - - #[test] - fn mempool_db_test_rbf() { - let mut chainstate = instantiate_chainstate(false, 0x80000000, "mempool_db_test_rbf"); - let chainstate_path = chainstate_path("mempool_db_test_rbf"); - let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); - - // create initial transaction - let mut mempool_tx = mempool.tx_begin().unwrap(); - let spending_condition = - TransactionSpendingCondition::Singlesig(SinglesigSpendingCondition { - signer: Hash160([0x11; 20]), - hash_mode: SinglesigHashMode::P2PKH, - key_encoding: TransactionPublicKeyEncoding::Uncompressed, - nonce: 123, - tx_fee: 456, - signature: MessageSignature::from_raw(&vec![0xff; 65]), - }); - let stx_address = StacksAddress { - version: 1, - bytes: Hash160([0xff; 20]), - }; - let payload = TransactionPayload::TokenTransfer( - PrincipalData::from(QualifiedContractIdentifier { - issuer: stx_address.into(), - name: "hello-contract-name".into(), - }), - 123, - TokenTransferMemo([0u8; 34]), - ); - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::Standard(spending_condition.clone()), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: Vec::new(), - payload, - }; + Ok((ret, next_page, num_rows_visited)) + } - let i: usize = 0; - let origin_address = StacksAddress { - version: 22, - bytes: Hash160::from_data(&i.to_be_bytes()), - }; - let sponsor_address = StacksAddress { - version: 22, - bytes: Hash160::from_data(&(i + 1).to_be_bytes()), - }; + /// Stream transaction data. + /// Send back one transaction at a time. + pub fn stream_txs( + &self, + fd: &mut W, + query: &mut TxStreamData, + count: u64, + ) -> Result { + let mut num_written = 0; + while num_written < count { + // write out bufferred tx + let start = query.tx_buf_ptr; + let end = cmp::min(query.tx_buf.len(), ((start as u64) + count) as usize); + fd.write_all(&query.tx_buf[start..end]) + .map_err(ChainstateError::WriteError)?; + + let nw = end.saturating_sub(start) as u64; + + query.tx_buf_ptr = end; + num_written += nw; + + if query.tx_buf_ptr >= query.tx_buf.len() { + if query.corked { + // we're done + test_debug!( + "Finished streaming txs; last page was {:?}", + &query.last_randomized_txid + ); + break; + } - tx.set_tx_fee(123); - let txid = tx.txid(); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - let expected_tx = tx.clone(); - let tx_fee = tx.get_tx_fee(); - let height = 100; - let origin_nonce = tx.get_origin_nonce(); - let sponsor_nonce = match tx.get_sponsor_nonce() { - Some(n) => n, - None => origin_nonce, - }; - let first_len = tx_bytes.len() as u64; + if query.num_txs >= query.max_txs { + // no more space in this stream + debug!( + "No more space in this query after {:?}. Corking tx stream.", + &query.last_randomized_txid + ); + + // send the next page ID + query.tx_buf_ptr = 0; + query.tx_buf.clear(); + query.corked = true; + + query + .last_randomized_txid + .consensus_serialize(&mut query.tx_buf) + .map_err(ChainstateError::CodecError)?; + continue; + } - assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &ConsensusHash([0x1; 20]), - &BlockHeaderHash([0x2; 32]), - txid, - tx_bytes, - tx_fee, - height, - &origin_address, - origin_nonce, - &sponsor_address, - sponsor_nonce, - None, - ) - .unwrap(); - assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - - // test retrieval of initial transaction - let tx_info_opt = MemPoolDB::get_tx(&mempool_tx, &txid).unwrap(); - let tx_info = tx_info_opt.unwrap(); - - // test replace-by-fee with a higher fee, where the payload is smaller - let old_txid = txid; - let old_tx_fee = tx_fee; - - tx.set_tx_fee(124); - tx.payload = TransactionPayload::TokenTransfer( - stx_address.into(), - 123, - TokenTransferMemo([0u8; 34]), - ); - assert!(txid != tx.txid()); - let txid = tx.txid(); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - let expected_tx = tx.clone(); - let tx_fee = tx.get_tx_fee(); - let second_len = tx_bytes.len() as u64; + // load next + let remaining = query.max_txs.saturating_sub(query.num_txs); + let (next_txs, next_last_randomized_txid_opt, num_rows_visited) = self + .find_next_missing_transactions( + &query.tx_query, + query.height, + &query.last_randomized_txid, + 1, + remaining, + )?; - // these asserts are to ensure we are using the fee directly, not the fee rate - assert!(second_len < first_len); - assert!(second_len * tx_fee < first_len * old_tx_fee); - assert!(tx_fee > old_tx_fee); - assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + debug!( + "Streaming mempool propagation stepped"; + "rows_visited" => num_rows_visited, + "last_rand_txid" => %query.last_randomized_txid, + "num_txs" => query.num_txs, + "max_txs" => query.max_txs + ); - let tx_info_before = - MemPoolDB::get_tx_metadata_by_address(&mempool_tx, true, &origin_address, origin_nonce) - .unwrap() - .unwrap(); - assert_eq!(tx_info_before, tx_info.metadata); + query.num_txs += num_rows_visited; + if next_txs.len() > 0 { + query.tx_buf_ptr = 0; + query.tx_buf.clear(); - MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &ConsensusHash([0x1; 20]), - &BlockHeaderHash([0x2; 32]), - txid, - tx_bytes, - tx_fee, - height, - &origin_address, - origin_nonce, - &sponsor_address, - sponsor_nonce, - None, - ) - .unwrap(); - - // check that the transaction was replaced - assert!(!MemPoolDB::db_has_tx(&mempool_tx, &old_txid).unwrap()); - assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); - - let tx_info_after = - MemPoolDB::get_tx_metadata_by_address(&mempool_tx, true, &origin_address, origin_nonce) - .unwrap() - .unwrap(); - assert!(tx_info_after != tx_info.metadata); - - // test retrieval -- transaction should have been replaced because it has a higher fee - let tx_info_opt = MemPoolDB::get_tx(&mempool_tx, &txid).unwrap(); - let tx_info = tx_info_opt.unwrap(); - assert_eq!(tx_info.metadata, tx_info_after); - assert_eq!(tx_info.metadata.len, second_len); - assert_eq!(tx_info.metadata.tx_fee, 124); + for next_tx in next_txs.iter() { + next_tx + .consensus_serialize(&mut query.tx_buf) + .map_err(ChainstateError::CodecError)?; + } + if let Some(next_last_randomized_txid) = next_last_randomized_txid_opt { + query.last_randomized_txid = next_last_randomized_txid; + } else { + test_debug!( + "No more txs after {}", + &next_txs + .last() + .map(|tx| tx.txid()) + .unwrap_or(Txid([0u8; 32])) + ); + break; + } + } else if let Some(next_txid) = next_last_randomized_txid_opt { + test_debug!( + "No rows returned for {}; cork tx stream with next page {}", + &query.last_randomized_txid, + &next_txid + ); + + // no rows found + query.last_randomized_txid = next_txid; + + // send the next page ID + query.tx_buf_ptr = 0; + query.tx_buf.clear(); + query.corked = true; + + query + .last_randomized_txid + .consensus_serialize(&mut query.tx_buf) + .map_err(ChainstateError::CodecError)?; + } else if next_last_randomized_txid_opt.is_none() { + // no more transactions + test_debug!( + "No more txs to send after {:?}; corking stream", + &query.last_randomized_txid + ); + + query.tx_buf_ptr = 0; + query.tx_buf.clear(); + query.corked = true; + } + } + } + Ok(num_written) } } diff --git a/src/core/mod.rs b/src/core/mod.rs index 33709e6e98..6e187c9ce1 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -30,6 +30,9 @@ pub use self::mempool::MemPoolDB; pub mod mempool; +#[cfg(test)] +pub mod tests; + use std::cmp::Ord; use std::cmp::Ordering; use std::cmp::PartialOrd; diff --git a/src/core/tests/mod.rs b/src/core/tests/mod.rs new file mode 100644 index 0000000000..a8856ab580 --- /dev/null +++ b/src/core/tests/mod.rs @@ -0,0 +1,2020 @@ +// Copyright (C) 2013-2021 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cmp; +use std::collections::HashSet; +use std::io; + +use address::AddressHashMode; +use burnchains::Address; +use burnchains::Txid; +use chainstate::burn::ConsensusHash; +use chainstate::stacks::db::test::chainstate_path; +use chainstate::stacks::db::test::instantiate_chainstate; +use chainstate::stacks::db::test::instantiate_chainstate_with_balances; +use chainstate::stacks::db::StreamCursor; +use chainstate::stacks::test::codec_all_transactions; +use chainstate::stacks::{ + db::blocks::MemPoolRejection, db::StacksChainState, index::MarfTrieId, CoinbasePayload, + Error as ChainstateError, SinglesigHashMode, SinglesigSpendingCondition, StacksPrivateKey, + StacksPublicKey, StacksTransaction, StacksTransactionSigner, TokenTransferMemo, + TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, + TransactionPostConditionMode, TransactionPublicKeyEncoding, TransactionSmartContract, + TransactionSpendingCondition, TransactionVersion, +}; +use chainstate::stacks::{ + C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, +}; +use core::mempool::MemPoolWalkSettings; +use core::mempool::TxTag; +use core::mempool::{BLOOM_COUNTER_DEPTH, BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS}; +use core::FIRST_BURNCHAIN_CONSENSUS_HASH; +use core::FIRST_STACKS_BLOCK_HASH; +use net::Error as NetError; +use net::HttpResponseType; +use net::MemPoolSyncData; +use util::bloom::test::setup_bloom_counter; +use util::bloom::*; +use util::db::{tx_begin_immediate, DBConn, FromRow}; +use util::get_epoch_time_ms; +use util::hash::Hash160; +use util::secp256k1::MessageSignature; +use util::{hash::hex_bytes, hash::to_hex, hash::*, log, secp256k1::*, strings::StacksString}; +use vm::{ + database::HeadersDB, + errors::Error as ClarityError, + errors::RuntimeErrorType, + tests::TEST_BURN_STATE_DB, + types::{PrincipalData, QualifiedContractIdentifier}, + ClarityName, ContractName, Value, +}; + +use crate::codec::StacksMessageCodec; +use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash}; +use crate::types::chainstate::{ + StacksAddress, StacksBlockHeader, StacksBlockId, StacksMicroblockHeader, StacksWorkScore, + VRFSeed, +}; +use crate::types::proof::TrieHash; +use crate::{ + chainstate::stacks::db::StacksHeaderInfo, util::vrf::VRFProof, vm::costs::ExecutionCost, +}; + +use super::MemPoolDB; + +use rand::prelude::*; +use rand::thread_rng; + +use codec::read_next; +use codec::Error as codec_error; + +const FOO_CONTRACT: &'static str = "(define-public (foo) (ok 1)) + (define-public (bar (x uint)) (ok x))"; +const SK_1: &'static str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; +const SK_2: &'static str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; +const SK_3: &'static str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; + +#[test] +fn mempool_db_init() { + let _chainstate = instantiate_chainstate(false, 0x80000000, "mempool_db_init"); + let chainstate_path = chainstate_path("mempool_db_init"); + let _mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); +} + +fn make_block( + chainstate: &mut StacksChainState, + block_consensus: ConsensusHash, + parent: &(ConsensusHash, BlockHeaderHash), + burn_height: u64, + block_height: u64, +) -> (ConsensusHash, BlockHeaderHash) { + let (mut chainstate_tx, clar_tx) = chainstate.chainstate_tx_begin().unwrap(); + + let anchored_header = StacksBlockHeader { + version: 1, + total_work: StacksWorkScore { + work: block_height, + burn: 1, + }, + proof: VRFProof::empty(), + parent_block: parent.1.clone(), + parent_microblock: BlockHeaderHash([0; 32]), + parent_microblock_sequence: 0, + tx_merkle_root: Sha512Trunc256Sum::empty(), + state_index_root: TrieHash::from_empty_data(), + microblock_pubkey_hash: Hash160([0; 20]), + }; + + let block_hash = anchored_header.block_hash(); + + let c_tx = StacksChainState::chainstate_block_begin( + &chainstate_tx, + clar_tx, + &TEST_BURN_STATE_DB, + &parent.0, + &parent.1, + &block_consensus, + &block_hash, + ); + + let new_tip_info = StacksHeaderInfo { + anchored_header, + microblock_tail: None, + index_root: TrieHash::from_empty_data(), + block_height, + consensus_hash: block_consensus.clone(), + burn_header_hash: BurnchainHeaderHash([0; 32]), + burn_header_height: burn_height as u32, + burn_header_timestamp: 0, + anchored_block_size: 1, + }; + + c_tx.commit_block(); + + let new_index_hash = StacksBlockId::new(&block_consensus, &block_hash); + + chainstate_tx + .put_indexed_begin(&StacksBlockId::new(&parent.0, &parent.1), &new_index_hash) + .unwrap(); + + StacksChainState::insert_stacks_block_header( + &mut chainstate_tx, + &new_index_hash, + &new_tip_info, + &ExecutionCost::zero(), + ) + .unwrap(); + + chainstate_tx.commit().unwrap(); + + (block_consensus, block_hash) +} + +#[test] +fn mempool_walk_over_fork() { + let mut chainstate = + instantiate_chainstate_with_balances(false, 0x80000000, "mempool_walk_over_fork", vec![]); + + // genesis -> b_1* -> b_2* + // \-> b_3 -> b_4 + // + // *'d blocks accept transactions, + // try to walk at b_4, we should be able to find + // the transaction at b_1 + + let b_1 = make_block( + &mut chainstate, + ConsensusHash([0x1; 20]), + &( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ), + 1, + 1, + ); + let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); + let b_5 = make_block(&mut chainstate, ConsensusHash([0x5; 20]), &b_2, 5, 3); + let b_3 = make_block(&mut chainstate, ConsensusHash([0x3; 20]), &b_1, 3, 2); + let b_4 = make_block(&mut chainstate, ConsensusHash([0x4; 20]), &b_3, 4, 3); + + let chainstate_path = chainstate_path("mempool_walk_over_fork"); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let mut all_txs = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::Any, + &TransactionPostConditionMode::Allow, + ); + + let blocks_to_broadcast_in = [&b_1, &b_2, &b_4]; + let mut txs = [ + all_txs.pop().unwrap(), + all_txs.pop().unwrap(), + all_txs.pop().unwrap(), + ]; + for tx in txs.iter_mut() { + tx.set_tx_fee(123); + } + + for ix in 0..3 { + let mut mempool_tx = mempool.tx_begin().unwrap(); + + let block = &blocks_to_broadcast_in[ix]; + let good_tx = &txs[ix]; + + let origin_address = StacksAddress { + version: 22, + bytes: Hash160::from_data(&[ix as u8; 32]), + }; + let sponsor_address = StacksAddress { + version: 22, + bytes: Hash160::from_data(&[0x80 | (ix as u8); 32]), + }; + + let txid = good_tx.txid(); + let tx_bytes = good_tx.serialize_to_vec(); + let tx_fee = good_tx.get_tx_fee(); + + let height = 1 + ix as u64; + + let origin_nonce = 0; // (2 * ix + i) as u64; + let sponsor_nonce = 0; // (2 * ix + i) as u64; + + assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &block.0, + &block.1, + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .unwrap(); + + assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + mempool_tx.commit().unwrap(); + } + + // genesis -> b_1* -> b_2* -> b_5 + // \-> b_3 -> b_4 + // + // *'d blocks accept transactions, + // try to walk at b_4, we should be able to find + // the transaction at b_1 + + let mut mempool_settings = MemPoolWalkSettings::default(); + mempool_settings.min_tx_fee = 10; + + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b_2.0, &b_2.1), + |clarity_conn| { + let mut count_txs = 0; + mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + 2, + mempool_settings.clone(), + |_, available_tx, _| { + count_txs += 1; + Ok(true) + }, + ) + .unwrap(); + assert_eq!( + count_txs, 3, + "Mempool should find three transactions from b_2" + ); + }, + ); + + // Now that the mempool has iterated over those transactions, its view of the + // nonce for the origin address should have changed. Now it should find *no* transactions. + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b_2.0, &b_2.1), + |clarity_conn| { + let mut count_txs = 0; + mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + 2, + mempool_settings.clone(), + |_, available_tx, _| { + count_txs += 1; + Ok(true) + }, + ) + .unwrap(); + assert_eq!(count_txs, 0, "Mempool should find no transactions"); + }, + ); + + mempool + .reset_last_known_nonces() + .expect("Should be able to reset nonces"); + + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b_5.0, &b_5.1), + |clarity_conn| { + let mut count_txs = 0; + mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + 3, + mempool_settings.clone(), + |_, available_tx, _| { + count_txs += 1; + Ok(true) + }, + ) + .unwrap(); + assert_eq!( + count_txs, 3, + "Mempool should find three transactions from b_5" + ); + }, + ); + + mempool + .reset_last_known_nonces() + .expect("Should be able to reset nonces"); + + // The mempool iterator no longer does any consideration of what block accepted + // the transaction, so b_3 should have the same view. + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b_3.0, &b_3.1), + |clarity_conn| { + let mut count_txs = 0; + mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + 2, + mempool_settings.clone(), + |_, available_tx, _| { + count_txs += 1; + Ok(true) + }, + ) + .unwrap(); + assert_eq!( + count_txs, 3, + "Mempool should find three transactions from b_3" + ); + }, + ); + + mempool + .reset_last_known_nonces() + .expect("Should be able to reset nonces"); + + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b_4.0, &b_4.1), + |clarity_conn| { + let mut count_txs = 0; + mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + 3, + mempool_settings.clone(), + |_, available_tx, _| { + count_txs += 1; + Ok(true) + }, + ) + .unwrap(); + assert_eq!( + count_txs, 3, + "Mempool should find three transactions from b_4" + ); + }, + ); + + mempool + .reset_last_known_nonces() + .expect("Should be able to reset nonces"); + + // let's test replace-across-fork while we're here. + // first try to replace a tx in b_2 in b_1 - should fail because they are in the same fork + let mut mempool_tx = mempool.tx_begin().unwrap(); + let block = &b_1; + let tx = &txs[1]; + let origin_address = StacksAddress { + version: 22, + bytes: Hash160::from_data(&[1; 32]), + }; + let sponsor_address = StacksAddress { + version: 22, + bytes: Hash160::from_data(&[0x81; 32]), + }; + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let tx_fee = tx.get_tx_fee(); + + let height = 3; + let origin_nonce = 0; + let sponsor_nonce = 0; + + // make sure that we already have the transaction we're testing for replace-across-fork + assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + assert!(MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &block.0, + &block.1, + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .is_err()); + + assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + mempool_tx.commit().unwrap(); + + // now try replace-across-fork from b_2 to b_4 + // check that the number of transactions at b_2 and b_4 starts at 1 each + assert_eq!( + MemPoolDB::get_num_tx_at_block(&mempool.db, &b_4.0, &b_4.1).unwrap(), + 1 + ); + assert_eq!( + MemPoolDB::get_num_tx_at_block(&mempool.db, &b_2.0, &b_2.1).unwrap(), + 1 + ); + let mut mempool_tx = mempool.tx_begin().unwrap(); + let block = &b_4; + let tx = &txs[1]; + let origin_address = StacksAddress { + version: 22, + bytes: Hash160::from_data(&[0; 32]), + }; + let sponsor_address = StacksAddress { + version: 22, + bytes: Hash160::from_data(&[1; 32]), + }; + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let tx_fee = tx.get_tx_fee(); + + let height = 3; + let origin_nonce = 1; + let sponsor_nonce = 1; + + // make sure that we already have the transaction we're testing for replace-across-fork + assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &block.0, + &block.1, + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .unwrap(); + + assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + mempool_tx.commit().unwrap(); + + // after replace-across-fork, tx[1] should have moved from the b_2->b_5 fork to b_4 + assert_eq!( + MemPoolDB::get_num_tx_at_block(&mempool.db, &b_4.0, &b_4.1).unwrap(), + 2 + ); + assert_eq!( + MemPoolDB::get_num_tx_at_block(&mempool.db, &b_2.0, &b_2.1).unwrap(), + 0 + ); +} + +#[test] +fn mempool_do_not_replace_tx() { + let mut chainstate = instantiate_chainstate_with_balances( + false, + 0x80000000, + "mempool_do_not_replace_tx", + vec![], + ); + + // genesis -> b_1 -> b_2 + // \-> b_3 + // + let b_1 = make_block( + &mut chainstate, + ConsensusHash([0x1; 20]), + &( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ), + 1, + 1, + ); + let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); + let b_3 = make_block(&mut chainstate, ConsensusHash([0x3; 20]), &b_1, 1, 1); + + let chainstate_path = chainstate_path("mempool_do_not_replace_tx"); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let mut txs = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::Any, + &TransactionPostConditionMode::Allow, + ); + let mut tx = txs.pop().unwrap(); + + let mut mempool_tx = mempool.tx_begin().unwrap(); + + // do an initial insert + let origin_address = StacksAddress { + version: 22, + bytes: Hash160::from_data(&[0; 32]), + }; + let sponsor_address = StacksAddress { + version: 22, + bytes: Hash160::from_data(&[1; 32]), + }; + + tx.set_tx_fee(123); + + // test insert + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + + let tx_fee = tx.get_tx_fee(); + let height = 100; + + let origin_nonce = tx.get_origin_nonce(); + let sponsor_nonce = match tx.get_sponsor_nonce() { + Some(n) => n, + None => origin_nonce, + }; + + assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &b_1.0, + &b_1.1, + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .unwrap(); + + assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + let prior_txid = txid.clone(); + + // now, let's try inserting again, with a lower fee, but at a different block hash + tx.set_tx_fee(100); + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let tx_fee = tx.get_tx_fee(); + let height = 100; + + let err_resp = MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &b_2.0, + &b_2.1, + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .unwrap_err(); + assert!(match err_resp { + MemPoolRejection::ConflictingNonceInMempool => true, + _ => false, + }); + + assert!(MemPoolDB::db_has_tx(&mempool_tx, &prior_txid).unwrap()); + assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); +} + +#[test] +fn mempool_db_load_store_replace_tx() { + let mut chainstate = + instantiate_chainstate(false, 0x80000000, "mempool_db_load_store_replace_tx"); + let chainstate_path = chainstate_path("mempool_db_load_store_replace_tx"); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let mut txs = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::Any, + &TransactionPostConditionMode::Allow, + ); + let num_txs = txs.len() as u64; + + let mut mempool_tx = mempool.tx_begin().unwrap(); + + eprintln!("add all txs"); + for (i, mut tx) in txs.drain(..).enumerate() { + // make sure each address is unique per tx (not the case in codec_all_transactions) + let origin_address = StacksAddress { + version: 22, + bytes: Hash160::from_data(&i.to_be_bytes()), + }; + let sponsor_address = StacksAddress { + version: 22, + bytes: Hash160::from_data(&(i + 1).to_be_bytes()), + }; + + tx.set_tx_fee(123); + + // test insert + + let txid = tx.txid(); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + let expected_tx = tx.clone(); + + let tx_fee = tx.get_tx_fee(); + let height = 100; + let origin_nonce = tx.get_origin_nonce(); + let sponsor_nonce = match tx.get_sponsor_nonce() { + Some(n) => n, + None => origin_nonce, + }; + let len = tx_bytes.len() as u64; + + assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &ConsensusHash([0x1; 20]), + &BlockHeaderHash([0x2; 32]), + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .unwrap(); + + assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + // test retrieval + let tx_info_opt = MemPoolDB::get_tx(&mempool_tx, &txid).unwrap(); + let tx_info = tx_info_opt.unwrap(); + + assert_eq!(tx_info.tx, expected_tx); + assert_eq!(tx_info.metadata.len, len); + assert_eq!(tx_info.metadata.tx_fee, 123); + assert_eq!(tx_info.metadata.origin_address, origin_address); + assert_eq!(tx_info.metadata.origin_nonce, origin_nonce); + assert_eq!(tx_info.metadata.sponsor_address, sponsor_address); + assert_eq!(tx_info.metadata.sponsor_nonce, sponsor_nonce); + assert_eq!(tx_info.metadata.consensus_hash, ConsensusHash([0x1; 20])); + assert_eq!( + tx_info.metadata.block_header_hash, + BlockHeaderHash([0x2; 32]) + ); + assert_eq!(tx_info.metadata.block_height, height); + + // test replace-by-fee with a higher fee + let old_txid = txid; + + tx.set_tx_fee(124); + assert!(txid != tx.txid()); + + let txid = tx.txid(); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + let expected_tx = tx.clone(); + let tx_fee = tx.get_tx_fee(); + + assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + let tx_info_before = + MemPoolDB::get_tx_metadata_by_address(&mempool_tx, true, &origin_address, origin_nonce) + .unwrap() + .unwrap(); + assert_eq!(tx_info_before, tx_info.metadata); + + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &ConsensusHash([0x1; 20]), + &BlockHeaderHash([0x2; 32]), + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .unwrap(); + + // was replaced + assert!(!MemPoolDB::db_has_tx(&mempool_tx, &old_txid).unwrap()); + assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + let tx_info_after = + MemPoolDB::get_tx_metadata_by_address(&mempool_tx, true, &origin_address, origin_nonce) + .unwrap() + .unwrap(); + assert!(tx_info_after != tx_info.metadata); + + // test retrieval -- transaction should have been replaced because it has a higher + // estimated fee + let tx_info_opt = MemPoolDB::get_tx(&mempool_tx, &txid).unwrap(); + + let tx_info = tx_info_opt.unwrap(); + assert_eq!(tx_info.metadata, tx_info_after); + + assert_eq!(tx_info.tx, expected_tx); + assert_eq!(tx_info.metadata.len, len); + assert_eq!(tx_info.metadata.tx_fee, 124); + assert_eq!(tx_info.metadata.origin_address, origin_address); + assert_eq!(tx_info.metadata.origin_nonce, origin_nonce); + assert_eq!(tx_info.metadata.sponsor_address, sponsor_address); + assert_eq!(tx_info.metadata.sponsor_nonce, sponsor_nonce); + assert_eq!(tx_info.metadata.consensus_hash, ConsensusHash([0x1; 20])); + assert_eq!( + tx_info.metadata.block_header_hash, + BlockHeaderHash([0x2; 32]) + ); + assert_eq!(tx_info.metadata.block_height, height); + + // test replace-by-fee with a lower fee + let old_txid = txid; + + tx.set_tx_fee(122); + assert!(txid != tx.txid()); + + let txid = tx.txid(); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + let _expected_tx = tx.clone(); + let tx_fee = tx.get_tx_fee(); + + assert!(match MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &ConsensusHash([0x1; 20]), + &BlockHeaderHash([0x2; 32]), + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .unwrap_err() + { + MemPoolRejection::ConflictingNonceInMempool => true, + _ => false, + }); + + // was NOT replaced + assert!(MemPoolDB::db_has_tx(&mempool_tx, &old_txid).unwrap()); + assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + } + mempool_tx.commit().unwrap(); + + eprintln!("get all txs"); + let txs = MemPoolDB::get_txs_after( + &mempool.db, + &ConsensusHash([0x1; 20]), + &BlockHeaderHash([0x2; 32]), + 0, + num_txs, + ) + .unwrap(); + assert_eq!(txs.len() as u64, num_txs); + + eprintln!("get empty txs"); + let txs = MemPoolDB::get_txs_after( + &mempool.db, + &ConsensusHash([0x1; 20]), + &BlockHeaderHash([0x3; 32]), + 0, + num_txs, + ) + .unwrap(); + assert_eq!(txs.len(), 0); + + eprintln!("get empty txs"); + let txs = MemPoolDB::get_txs_after( + &mempool.db, + &ConsensusHash([0x2; 20]), + &BlockHeaderHash([0x2; 32]), + 0, + num_txs, + ) + .unwrap(); + assert_eq!(txs.len(), 0); + + eprintln!("garbage-collect"); + let mut mempool_tx = mempool.tx_begin().unwrap(); + MemPoolDB::garbage_collect(&mut mempool_tx, 101, None).unwrap(); + mempool_tx.commit().unwrap(); + + let txs = MemPoolDB::get_txs_after( + &mempool.db, + &ConsensusHash([0x1; 20]), + &BlockHeaderHash([0x2; 32]), + 0, + num_txs, + ) + .unwrap(); + assert_eq!(txs.len(), 0); +} + +#[test] +fn mempool_db_test_rbf() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, "mempool_db_test_rbf"); + let chainstate_path = chainstate_path("mempool_db_test_rbf"); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + // create initial transaction + let mut mempool_tx = mempool.tx_begin().unwrap(); + let spending_condition = TransactionSpendingCondition::Singlesig(SinglesigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: SinglesigHashMode::P2PKH, + key_encoding: TransactionPublicKeyEncoding::Uncompressed, + nonce: 123, + tx_fee: 456, + signature: MessageSignature::from_raw(&vec![0xff; 65]), + }); + let stx_address = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + let payload = TransactionPayload::TokenTransfer( + PrincipalData::from(QualifiedContractIdentifier { + issuer: stx_address.into(), + name: "hello-contract-name".into(), + }), + 123, + TokenTransferMemo([0u8; 34]), + ); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::Standard(spending_condition.clone()), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: Vec::new(), + payload, + }; + + let i: usize = 0; + let origin_address = StacksAddress { + version: 22, + bytes: Hash160::from_data(&i.to_be_bytes()), + }; + let sponsor_address = StacksAddress { + version: 22, + bytes: Hash160::from_data(&(i + 1).to_be_bytes()), + }; + + tx.set_tx_fee(123); + let txid = tx.txid(); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + let expected_tx = tx.clone(); + let tx_fee = tx.get_tx_fee(); + let height = 100; + let origin_nonce = tx.get_origin_nonce(); + let sponsor_nonce = match tx.get_sponsor_nonce() { + Some(n) => n, + None => origin_nonce, + }; + let first_len = tx_bytes.len() as u64; + + assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &ConsensusHash([0x1; 20]), + &BlockHeaderHash([0x2; 32]), + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .unwrap(); + assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + // test retrieval of initial transaction + let tx_info_opt = MemPoolDB::get_tx(&mempool_tx, &txid).unwrap(); + let tx_info = tx_info_opt.unwrap(); + + // test replace-by-fee with a higher fee, where the payload is smaller + let old_txid = txid; + let old_tx_fee = tx_fee; + + tx.set_tx_fee(124); + tx.payload = + TransactionPayload::TokenTransfer(stx_address.into(), 123, TokenTransferMemo([0u8; 34])); + assert!(txid != tx.txid()); + let txid = tx.txid(); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + let expected_tx = tx.clone(); + let tx_fee = tx.get_tx_fee(); + let second_len = tx_bytes.len() as u64; + + // these asserts are to ensure we are using the fee directly, not the fee rate + assert!(second_len < first_len); + assert!(second_len * tx_fee < first_len * old_tx_fee); + assert!(tx_fee > old_tx_fee); + assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + let tx_info_before = + MemPoolDB::get_tx_metadata_by_address(&mempool_tx, true, &origin_address, origin_nonce) + .unwrap() + .unwrap(); + assert_eq!(tx_info_before, tx_info.metadata); + + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &ConsensusHash([0x1; 20]), + &BlockHeaderHash([0x2; 32]), + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .unwrap(); + + // check that the transaction was replaced + assert!(!MemPoolDB::db_has_tx(&mempool_tx, &old_txid).unwrap()); + assert!(MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); + + let tx_info_after = + MemPoolDB::get_tx_metadata_by_address(&mempool_tx, true, &origin_address, origin_nonce) + .unwrap() + .unwrap(); + assert!(tx_info_after != tx_info.metadata); + + // test retrieval -- transaction should have been replaced because it has a higher fee + let tx_info_opt = MemPoolDB::get_tx(&mempool_tx, &txid).unwrap(); + let tx_info = tx_info_opt.unwrap(); + assert_eq!(tx_info.metadata, tx_info_after); + assert_eq!(tx_info.metadata.len, second_len); + assert_eq!(tx_info.metadata.tx_fee, 124); +} + +#[test] +fn test_add_txs_bloom_filter() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, "mempool_add_txs_bloom_filter"); + let chainstate_path = chainstate_path("mempool_add_txs_bloom_filter"); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + + let mut all_txids: Vec> = vec![]; + + // none conflict + for block_height in 10..(10 + 10 * BLOOM_COUNTER_DEPTH) { + let mut txids: Vec = vec![]; + let mut fp_count = 0; + + let bf = mempool.get_txid_bloom_filter().unwrap(); + let mut mempool_tx = mempool.tx_begin().unwrap(); + for i in 0..128 { + let pk = StacksPrivateKey::new(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + (block_height + i * 128) as u64, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &ConsensusHash([0x1 + (block_height as u8); 20]), + &BlockHeaderHash([0x2 + (block_height as u8); 32]), + txid, + tx_bytes, + tx_fee, + block_height as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + if bf.contains_raw(&tx.txid().0) { + fp_count += 1; + } + + txids.push(txid); + } + + mempool_tx.commit().unwrap(); + + // nearly all txs should be new + assert!((fp_count as f64) / (MAX_BLOOM_COUNTER_TXS as f64) <= BLOOM_COUNTER_ERROR_RATE); + + let bf = mempool.get_txid_bloom_filter().unwrap(); + for txid in txids.iter() { + assert!( + bf.contains_raw(&txid.0), + "Bloom filter does not contain {}", + &txid + ); + } + + all_txids.push(txids); + + if block_height > 10 + BLOOM_COUNTER_DEPTH { + let expired_block_height = block_height - BLOOM_COUNTER_DEPTH; + let bf = mempool.get_txid_bloom_filter().unwrap(); + for i in 0..(block_height - 10 - BLOOM_COUNTER_DEPTH) { + let txids = &all_txids[i]; + let mut fp_count = 0; + for txid in txids { + if bf.contains_raw(&txid.0) { + fp_count += 1; + } + } + + // these expired txids should mostly be absent + assert!( + (fp_count as f64) / (MAX_BLOOM_COUNTER_TXS as f64) <= BLOOM_COUNTER_ERROR_RATE + ); + } + } + } +} + +#[test] +fn test_txtags() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, "mempool_txtags"); + let chainstate_path = chainstate_path("mempool_txtags"); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + + let mut seed = [0u8; 32]; + thread_rng().fill_bytes(&mut seed); + + let mut all_txtags: Vec> = vec![]; + + for block_height in 10..(10 + 10 * BLOOM_COUNTER_DEPTH) { + let mut txtags: Vec = vec![]; + + let mut mempool_tx = mempool.tx_begin().unwrap(); + for i in 0..128 { + let pk = StacksPrivateKey::new(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + (block_height + i * 128) as u64, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + let txtag = TxTag::from(&seed, &txid); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &ConsensusHash([0x1 + (block_height as u8); 20]), + &BlockHeaderHash([0x2 + (block_height as u8); 32]), + txid, + tx_bytes, + tx_fee, + block_height as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + txtags.push(txtag); + } + + mempool_tx.commit().unwrap(); + all_txtags.push(txtags); + + if block_height - 10 >= BLOOM_COUNTER_DEPTH { + assert_eq!( + MemPoolDB::get_num_recent_txs(mempool.conn()).unwrap(), + (BLOOM_COUNTER_DEPTH * 128) as u64 + ); + } + + let txtags = mempool.get_txtags(&seed).unwrap(); + let len_txtags = all_txtags.len(); + let last_txtags = + &all_txtags[len_txtags.saturating_sub(BLOOM_COUNTER_DEPTH as usize)..len_txtags]; + + let mut expected_txtag_set = HashSet::new(); + for txtags in last_txtags.iter() { + for txtag in txtags.iter() { + expected_txtag_set.insert(txtag.clone()); + } + } + + assert_eq!(expected_txtag_set.len(), txtags.len()); + for txtag in txtags.into_iter() { + assert!(expected_txtag_set.contains(&txtag)); + } + } +} + +#[test] +#[ignore] +fn test_make_mempool_sync_data() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, "make_mempool_sync_data"); + let chainstate_path = chainstate_path("make_mempool_sync_data"); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + + let mut txids = vec![]; + let mut nonrecent_fp_rates = vec![]; + for block_height in 10..(10 + BLOOM_COUNTER_DEPTH + 1) { + for i in 0..((MAX_BLOOM_COUNTER_TXS + 128) as usize) { + let mut mempool_tx = mempool.tx_begin().unwrap(); + for j in 0..128 { + let pk = StacksPrivateKey::new(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &ConsensusHash([0x1 + (block_height as u8); 20]), + &BlockHeaderHash([0x2 + (block_height as u8); 32]), + txid.clone(), + tx_bytes, + tx_fee, + block_height as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + txids.push(txid); + } + mempool_tx.commit().unwrap(); + + let ts_1 = get_epoch_time_ms(); + let ms = mempool.make_mempool_sync_data().unwrap(); + let ts_2 = get_epoch_time_ms(); + eprintln!( + "make_mempool_sync_data({}): {} ms", + txids.len(), + ts_2.saturating_sub(ts_1) + ); + + let mut present_count: u32 = 0; + let mut absent_count: u32 = 0; + let mut fp_count: u32 = 0; + match ms { + MemPoolSyncData::BloomFilter(ref bf) => { + eprintln!( + "bloomfilter({}); txids.len() == {}", + block_height, + txids.len() + ); + let recent_txids = mempool.get_bloom_txids().unwrap(); + assert!(recent_txids.len() <= MAX_BLOOM_COUNTER_TXS as usize); + + let max_height = MemPoolDB::get_max_height(mempool.conn()) + .unwrap() + .unwrap_or(0); + eprintln!( + "bloomfilter({}): recent_txids.len() == {}, max height is {}", + block_height, + recent_txids.len(), + max_height + ); + + let mut recent_set = HashSet::new(); + let mut in_bf = 0; + for txid in recent_txids.iter() { + if bf.contains_raw(&txid.0) { + in_bf += 1; + } + recent_set.insert(txid.clone()); + } + + eprintln!("in bloom filter: {}", in_bf); + assert!(in_bf >= recent_txids.len()); + + for txid in txids.iter() { + if !recent_set.contains(&txid) && bf.contains_raw(&txid.0) { + fp_count += 1; + } + if bf.contains_raw(&txid.0) { + present_count += 1; + } else { + absent_count += 1; + } + } + + // all recent transactions should be present + assert!( + present_count >= cmp::min(MAX_BLOOM_COUNTER_TXS.into(), txids.len() as u32) + ); + } + MemPoolSyncData::TxTags(ref seed, ref tags) => { + eprintln!("txtags({}); txids.len() == {}", block_height, txids.len()); + let recent_txids = mempool.get_bloom_txids().unwrap(); + + // all tags are present in the recent set + let mut recent_set = HashSet::new(); + for txid in recent_txids { + recent_set.insert(TxTag::from(seed, &txid)); + } + + for tag in tags.iter() { + assert!(recent_set.contains(tag)); + } + } + } + + let mut nonrecent_fp_rate = 0.0f64; + let recent_txids = mempool.get_bloom_txids().unwrap(); + if recent_txids.len() < (present_count + absent_count) as usize { + nonrecent_fp_rate = (fp_count as f64) + / ((present_count + absent_count - (recent_txids.len() as u32)) as f64); + eprintln!( + "Nonrecent false positive rate: {} / ({} + {} - {} = {}) = {}", + fp_count, + present_count, + absent_count, + recent_txids.len(), + present_count + absent_count - (recent_txids.len() as u32), + nonrecent_fp_rate + ); + } + + let total_count = MemPoolDB::get_num_recent_txs(&mempool.conn()).unwrap(); + eprintln!( + "present_count: {}, absent count: {}, total sent: {}, total recent: {}", + present_count, + absent_count, + txids.len(), + total_count + ); + + nonrecent_fp_rates.push(nonrecent_fp_rate); + } + } + + // average false positive rate for non-recent transactions should be around the bloom + // counter false positive rate + let num_nonrecent_fp_samples = nonrecent_fp_rates.len() as f64; + let avg_nonrecent_fp_rate = + nonrecent_fp_rates.iter().fold(0.0f64, |acc, x| acc + x) / num_nonrecent_fp_samples; + + assert!((avg_nonrecent_fp_rate - BLOOM_COUNTER_ERROR_RATE).abs() < 0.001); +} + +#[test] +fn test_find_next_missing_transactions() { + let mut chainstate = + instantiate_chainstate(false, 0x80000000, "find_next_missing_transactions"); + let chainstate_path = chainstate_path("find_next_missing_transactions"); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + + let block_height = 10; + let mut txids = vec![]; + + let mut mempool_tx = mempool.tx_begin().unwrap(); + for i in 0..(2 * MAX_BLOOM_COUNTER_TXS) { + let pk = StacksPrivateKey::new(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &ConsensusHash([0x1 + (block_height as u8); 20]), + &BlockHeaderHash([0x2 + (block_height as u8); 32]), + txid.clone(), + tx_bytes, + tx_fee, + block_height as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + txids.push(txid); + } + mempool_tx.commit().unwrap(); + + let mut txid_set = HashSet::new(); + for txid in txids.iter() { + txid_set.insert(txid.clone()); + } + + eprintln!("Find next missing transactions"); + + let txtags = mempool.get_txtags(&[0u8; 32]).unwrap(); + + // no txs returned for a full txtag set + let ts_before = get_epoch_time_ms(); + let (txs, next_page_opt, _) = mempool + .find_next_missing_transactions( + &MemPoolSyncData::TxTags([0u8; 32], txtags.clone()), + block_height, + &Txid([0u8; 32]), + (2 * MAX_BLOOM_COUNTER_TXS) as u64, + MAX_BLOOM_COUNTER_TXS as u64, + ) + .unwrap(); + let ts_after = get_epoch_time_ms(); + eprintln!( + "find_next_missing_transactions with full txtag set took {} ms", + ts_after.saturating_sub(ts_before) + ); + + assert_eq!(txs.len(), 0); + assert!(next_page_opt.is_some()); + + // all txs returned for an empty txtag set + let ts_before = get_epoch_time_ms(); + let (txs, next_page_opt, _) = mempool + .find_next_missing_transactions( + &MemPoolSyncData::TxTags([0u8; 32], vec![]), + block_height, + &Txid([0u8; 32]), + (2 * MAX_BLOOM_COUNTER_TXS) as u64, + MAX_BLOOM_COUNTER_TXS as u64, + ) + .unwrap(); + let ts_after = get_epoch_time_ms(); + eprintln!( + "find_next_missing_transactions with empty txtag set took {} ms", + ts_after.saturating_sub(ts_before) + ); + + for tx in txs { + assert!(txid_set.contains(&tx.txid())); + } + assert!(next_page_opt.is_some()); + + // all bloom-filter-absent txids should be returned + let ts_before = get_epoch_time_ms(); + let txid_bloom = mempool.get_txid_bloom_filter().unwrap(); + let (txs, next_page_opt, _) = mempool + .find_next_missing_transactions( + &MemPoolSyncData::BloomFilter(txid_bloom), + block_height, + &Txid([0u8; 32]), + (2 * MAX_BLOOM_COUNTER_TXS) as u64, + (2 * MAX_BLOOM_COUNTER_TXS) as u64, + ) + .unwrap(); + let ts_after = get_epoch_time_ms(); + eprintln!( + "find_next_missing_transactions with full bloom filter set took {} ms", + ts_after.saturating_sub(ts_before) + ); + + assert_eq!(txs.len(), 0); + assert!(next_page_opt.is_some()); + + let mut empty_bloom_conn = setup_bloom_counter("find_next_missing_txs_empty"); + let mut empty_tx = tx_begin_immediate(&mut empty_bloom_conn).unwrap(); + let hasher = BloomNodeHasher::new(&[0u8; 32]); + let empty_bloom = BloomCounter::new( + &mut empty_tx, + "bloom_counter", + BLOOM_COUNTER_ERROR_RATE, + MAX_BLOOM_COUNTER_TXS, + hasher, + ) + .unwrap(); + empty_tx.commit().unwrap(); + + let ts_before = get_epoch_time_ms(); + let (txs, next_page_opt, _) = mempool + .find_next_missing_transactions( + &MemPoolSyncData::BloomFilter(empty_bloom.to_bloom_filter(&empty_bloom_conn).unwrap()), + block_height, + &Txid([0u8; 32]), + (2 * MAX_BLOOM_COUNTER_TXS) as u64, + (2 * MAX_BLOOM_COUNTER_TXS) as u64, + ) + .unwrap(); + let ts_after = get_epoch_time_ms(); + eprintln!( + "find_next_missing_transactions with empty bloom filter set took {} ms", + ts_after.saturating_sub(ts_before) + ); + + for tx in txs { + assert!(txid_set.contains(&tx.txid())); + } + assert!(next_page_opt.is_some()); + + // paginated access works too + let mut last_txid = Txid([0u8; 32]); + let page_size = 128; + let mut all_txs = vec![]; + for i in 0..(txtags.len() / (page_size as usize)) + 1 { + let (mut txs, next_page_opt, num_visited) = mempool + .find_next_missing_transactions( + &MemPoolSyncData::TxTags([0u8; 32], vec![]), + block_height, + &last_txid, + (2 * MAX_BLOOM_COUNTER_TXS) as u64, + page_size, + ) + .unwrap(); + assert!(txs.len() <= page_size as usize); + assert!(num_visited <= page_size as u64); + + if txs.len() == 0 { + assert!(next_page_opt.is_none()); + break; + } + + last_txid = mempool + .get_randomized_txid(&txs.last().clone().unwrap().txid()) + .unwrap() + .unwrap(); + + assert_eq!(last_txid, next_page_opt.unwrap()); + all_txs.append(&mut txs); + } + + for tx in all_txs { + assert!(txid_set.contains(&tx.txid())); + } + + last_txid = Txid([0u8; 32]); + all_txs = vec![]; + for i in 0..(txtags.len() / (page_size as usize)) + 1 { + let ts_before = get_epoch_time_ms(); + let (mut txs, next_page_opt, num_visited) = mempool + .find_next_missing_transactions( + &MemPoolSyncData::BloomFilter( + empty_bloom.to_bloom_filter(&empty_bloom_conn).unwrap(), + ), + block_height, + &last_txid, + (2 * MAX_BLOOM_COUNTER_TXS) as u64, + page_size, + ) + .unwrap(); + let ts_after = get_epoch_time_ms(); + eprintln!("find_next_missing_transactions with empty bloom filter took {} ms to serve {} transactions", ts_after.saturating_sub(ts_before), page_size); + + assert!(txs.len() <= page_size as usize); + assert!(num_visited <= page_size as u64); + + if txs.len() == 0 { + assert!(next_page_opt.is_none()); + break; + } + + last_txid = mempool + .get_randomized_txid(&txs.last().clone().unwrap().txid()) + .unwrap() + .unwrap(); + + assert_eq!(last_txid, next_page_opt.unwrap()); + all_txs.append(&mut txs); + } + + for tx in all_txs { + assert!(txid_set.contains(&tx.txid())); + } + + // old transactions are ignored + let (old_txs, next_page_opt, num_visited) = mempool + .find_next_missing_transactions( + &MemPoolSyncData::TxTags([0u8; 32], vec![]), + block_height + (BLOOM_COUNTER_DEPTH as u64) + 1, + &last_txid, + (2 * MAX_BLOOM_COUNTER_TXS) as u64, + (2 * MAX_BLOOM_COUNTER_TXS) as u64, + ) + .unwrap(); + assert_eq!(old_txs.len(), 0); + assert!(next_page_opt.is_none()); + + let (old_txs, next_page_opt, num_visited) = mempool + .find_next_missing_transactions( + &MemPoolSyncData::BloomFilter(empty_bloom.to_bloom_filter(&empty_bloom_conn).unwrap()), + block_height + (BLOOM_COUNTER_DEPTH as u64) + 1, + &last_txid, + (2 * MAX_BLOOM_COUNTER_TXS) as u64, + (2 * MAX_BLOOM_COUNTER_TXS) as u64, + ) + .unwrap(); + assert_eq!(old_txs.len(), 0); + assert!(next_page_opt.is_none()); +} + +#[test] +fn test_stream_txs() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, "test_stream_txs"); + let chainstate_path = chainstate_path("test_stream_txs"); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + let mut txs = vec![]; + let block_height = 10; + let mut total_len = 0; + + let mut mempool_tx = mempool.tx_begin().unwrap(); + for i in 0..10 { + let pk = StacksPrivateKey::new(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + total_len += tx_bytes.len(); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &ConsensusHash([0x1 + (block_height as u8); 20]), + &BlockHeaderHash([0x2 + (block_height as u8); 32]), + txid.clone(), + tx_bytes, + tx_fee, + block_height as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + txs.push(tx); + } + mempool_tx.commit().unwrap(); + + let mut buf = vec![]; + let stream = StreamCursor::new_tx_stream( + MemPoolSyncData::TxTags([0u8; 32], vec![]), + MAX_BLOOM_COUNTER_TXS.into(), + block_height, + Some(Txid([0u8; 32])), + ); + let mut tx_stream_data = if let StreamCursor::MempoolTxs(stream_data) = stream { + stream_data + } else { + unreachable!(); + }; + + loop { + let nw = match mempool.stream_txs(&mut buf, &mut tx_stream_data, 10) { + Ok(nw) => nw, + Err(e) => { + error!("Failed to stream_to: {:?}", &e); + panic!(); + } + }; + if nw == 0 { + break; + } + } + + eprintln!("Read {} bytes of tx data", buf.len()); + + // buf decodes to the list of txs we have + let mut decoded_txs = vec![]; + let mut ptr = &buf[..]; + loop { + let tx: StacksTransaction = match read_next::(&mut ptr) { + Ok(tx) => tx, + Err(e) => match e { + codec_error::ReadError(ref ioe) => match ioe.kind() { + io::ErrorKind::UnexpectedEof => { + eprintln!("out of transactions"); + break; + } + _ => { + panic!("IO error: {:?}", &e); + } + }, + _ => { + panic!("other error: {:?}", &e); + } + }, + }; + decoded_txs.push(tx); + } + + let mut tx_set = HashSet::new(); + for tx in txs.iter() { + tx_set.insert(tx.txid()); + } + + // the order won't be preserved + assert_eq!(tx_set.len(), decoded_txs.len()); + for tx in decoded_txs { + assert!(tx_set.contains(&tx.txid())); + } + + // verify that we can stream through pagination, with an empty tx tags + let mut page_id = Txid([0u8; 32]); + let mut decoded_txs = vec![]; + loop { + let stream = StreamCursor::new_tx_stream( + MemPoolSyncData::TxTags([0u8; 32], vec![]), + 1, + block_height, + Some(page_id), + ); + + let mut tx_stream_data = if let StreamCursor::MempoolTxs(stream_data) = stream { + stream_data + } else { + unreachable!(); + }; + + let mut buf = vec![]; + loop { + let nw = match mempool.stream_txs(&mut buf, &mut tx_stream_data, 10) { + Ok(nw) => nw, + Err(e) => { + error!("Failed to stream_to: {:?}", &e); + panic!(); + } + }; + if nw == 0 { + break; + } + } + + // buf decodes to the list of txs we have, plus page ids + let mut ptr = &buf[..]; + test_debug!("Decode {}", to_hex(ptr)); + let (mut next_txs, next_page) = HttpResponseType::decode_tx_stream(&mut ptr, None).unwrap(); + + decoded_txs.append(&mut next_txs); + + // for fun, use a page ID that is actually a well-formed prefix of a transaction + if let Some(ref tx) = decoded_txs.last() { + let mut evil_buf = tx.serialize_to_vec(); + let mut evil_page_id = [0u8; 32]; + evil_page_id.copy_from_slice(&evil_buf[0..32]); + evil_buf.extend_from_slice(&evil_page_id); + + test_debug!("Decode evil buf {}", &to_hex(&evil_buf)); + + let (evil_next_txs, evil_next_page) = + HttpResponseType::decode_tx_stream(&mut &evil_buf[..], None).unwrap(); + + // should still work + assert_eq!(evil_next_txs.len(), 1); + assert_eq!(evil_next_txs[0].txid(), tx.txid()); + assert_eq!(evil_next_page.unwrap().0[0..32], evil_buf[0..32]); + } + + if let Some(next_page) = next_page { + page_id = next_page; + } else { + break; + } + } + + // make sure we got them all + let mut tx_set = HashSet::new(); + for tx in txs.iter() { + tx_set.insert(tx.txid()); + } + + // the order won't be preserved + assert_eq!(tx_set.len(), decoded_txs.len()); + for tx in decoded_txs { + assert!(tx_set.contains(&tx.txid())); + } + + // verify that we can stream through pagination, with a full bloom filter + let mut page_id = Txid([0u8; 32]); + let all_txs_tags: Vec<_> = txs + .iter() + .map(|tx| TxTag::from(&[0u8; 32], &tx.txid())) + .collect(); + loop { + let stream = StreamCursor::new_tx_stream( + MemPoolSyncData::TxTags([0u8; 32], all_txs_tags.clone()), + 1, + block_height, + Some(page_id), + ); + + let mut tx_stream_data = if let StreamCursor::MempoolTxs(stream_data) = stream { + stream_data + } else { + unreachable!(); + }; + + let mut buf = vec![]; + loop { + let nw = match mempool.stream_txs(&mut buf, &mut tx_stream_data, 10) { + Ok(nw) => nw, + Err(e) => { + error!("Failed to stream_to: {:?}", &e); + panic!(); + } + }; + if nw == 0 { + break; + } + } + + // buf decodes to an empty list of txs, plus page ID + let mut ptr = &buf[..]; + test_debug!("Decode {}", to_hex(ptr)); + let (next_txs, next_page) = HttpResponseType::decode_tx_stream(&mut ptr, None).unwrap(); + + assert_eq!(next_txs.len(), 0); + + if let Some(next_page) = next_page { + page_id = next_page; + } else { + break; + } + } +} + +#[test] +fn test_decode_tx_stream() { + let addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + let mut txs = vec![]; + for _i in 0..10 { + let pk = StacksPrivateKey::new(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + txs.push(tx); + } + + // valid empty tx stream + let empty_stream = [0x11u8; 32]; + let (next_txs, next_page) = + HttpResponseType::decode_tx_stream(&mut empty_stream.as_ref(), None).unwrap(); + assert_eq!(next_txs.len(), 0); + assert_eq!(next_page, Some(Txid([0x11; 32]))); + + // valid tx stream with a page id at the end + let mut tx_stream: Vec = vec![]; + for tx in txs.iter() { + tx.consensus_serialize(&mut tx_stream).unwrap(); + } + tx_stream.extend_from_slice(&[0x22; 32]); + + let (next_txs, next_page) = + HttpResponseType::decode_tx_stream(&mut &tx_stream[..], None).unwrap(); + assert_eq!(next_txs, txs); + assert_eq!(next_page, Some(Txid([0x22; 32]))); + + // valid tx stream with _no_ page id at the end + let mut partial_stream: Vec = vec![]; + txs[0].consensus_serialize(&mut partial_stream).unwrap(); + let (next_txs, next_page) = + HttpResponseType::decode_tx_stream(&mut &partial_stream[..], None).unwrap(); + assert_eq!(next_txs.len(), 1); + assert_eq!(next_txs[0], txs[0]); + assert!(next_page.is_none()); + + // garbage tx stream + let garbage_stream = [0xff; 256]; + let err = HttpResponseType::decode_tx_stream(&mut garbage_stream.as_ref(), None); + match err { + Err(NetError::ExpectedEndOfStream) => {} + x => { + error!("did not fail: {:?}", &x); + panic!(); + } + } + + // tx stream that is too short + let short_stream = [0x33u8; 33]; + let err = HttpResponseType::decode_tx_stream(&mut short_stream.as_ref(), None); + match err { + Err(NetError::ExpectedEndOfStream) => {} + x => { + error!("did not fail: {:?}", &x); + panic!(); + } + } + + // tx stream has a tx, a page ID, and then another tx + let mut interrupted_stream = vec![]; + txs[0].consensus_serialize(&mut interrupted_stream).unwrap(); + interrupted_stream.extend_from_slice(&[0x00u8; 32]); + txs[1].consensus_serialize(&mut interrupted_stream).unwrap(); + + let err = HttpResponseType::decode_tx_stream(&mut &interrupted_stream[..], None); + match err { + Err(NetError::ExpectedEndOfStream) => {} + x => { + error!("did not fail: {:?}", &x); + panic!(); + } + } +} diff --git a/src/cost_estimates/fee_medians.rs b/src/cost_estimates/fee_medians.rs new file mode 100644 index 0000000000..a2c1bc88e6 --- /dev/null +++ b/src/cost_estimates/fee_medians.rs @@ -0,0 +1,359 @@ +use std::cmp; +use std::cmp::Ordering; +use std::convert::TryFrom; +use std::{iter::FromIterator, path::Path}; + +use rusqlite::AndThenRows; +use rusqlite::Transaction as SqlTransaction; +use rusqlite::{ + types::{FromSql, FromSqlError}, + Connection, Error as SqliteError, OptionalExtension, ToSql, +}; +use serde_json::Value as JsonValue; + +use chainstate::stacks::TransactionPayload; +use util::db::sqlite_open; +use util::db::tx_begin_immediate_sqlite; +use util::db::u64_to_sql; + +use vm::costs::ExecutionCost; + +use chainstate::stacks::db::StacksEpochReceipt; +use chainstate::stacks::events::TransactionOrigin; + +use crate::util::db::sql_pragma; +use crate::util::db::table_exists; + +use super::metrics::CostMetric; +use super::FeeRateEstimate; +use super::{EstimatorError, FeeEstimator}; + +use super::metrics::PROPORTION_RESOLUTION; +use cost_estimates::StacksTransactionReceipt; + +const CREATE_TABLE: &'static str = " +CREATE TABLE median_fee_estimator ( + measure_key INTEGER PRIMARY KEY AUTOINCREMENT, + high NUMBER NOT NULL, + middle NUMBER NOT NULL, + low NUMBER NOT NULL +)"; + +const MINIMUM_TX_FEE_RATE: f64 = 1f64; + +/// FeeRateEstimator with the following properties: +/// +/// 1) We use a "weighted" percentile approach for calculating the percentile values. Described +/// below, larger transactions contribute more to the ranking than small transactions. +/// 2) Use "windowed" decay instead of exponential decay. This allows outliers to be forgotten +/// faster, and so reduces the influence of outliers. +/// 3) "Pad" the block, so that any unused spaces is considered to have an associated fee rate of +/// 1f, the minimum. Ignoring the amount of empty space leads to over-estimates because it +/// ignores the fact that there was still space in the block. +pub struct WeightedMedianFeeRateEstimator { + db: Connection, + /// We only look back `window_size` fee rates when averaging past estimates. + window_size: u32, + /// The weight of a "full block" in abstract scalar cost units. This is the weight of + /// a block that is filled *one single* dimension. + full_block_weight: u64, + /// Use this cost metric in fee rate calculations. + metric: M, +} + +/// Convenience struct for passing around this pair. +#[derive(Debug)] +pub struct FeeRateAndWeight { + pub fee_rate: f64, + pub weight: u64, +} + +impl WeightedMedianFeeRateEstimator { + /// Open a fee rate estimator at the given db path. Creates if not existent. + pub fn open(p: &Path, metric: M, window_size: u32) -> Result { + let mut db = sqlite_open( + p, + rusqlite::OpenFlags::SQLITE_OPEN_CREATE | rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, + false, + )?; + + // check if the db needs to be instantiated regardless of whether or not + // it was newly created: the db itself may be shared with other fee estimators, + // which would not have created the necessary table for this estimator. + let tx = tx_begin_immediate_sqlite(&mut db)?; + Self::instantiate_db(&tx)?; + tx.commit()?; + + Ok(Self { + db, + metric, + window_size, + full_block_weight: PROPORTION_RESOLUTION, + }) + } + + /// Check if the SQL database was already created. Necessary to avoid races if + /// different threads open an estimator at the same time. + fn db_already_instantiated(tx: &SqlTransaction) -> Result { + table_exists(tx, "median_fee_estimator") + } + + fn instantiate_db(tx: &SqlTransaction) -> Result<(), SqliteError> { + if !Self::db_already_instantiated(tx)? { + tx.execute(CREATE_TABLE, rusqlite::NO_PARAMS)?; + } + + Ok(()) + } + + fn get_rate_estimates_from_sql( + conn: &Connection, + window_size: u32, + ) -> Result { + let sql = + "SELECT high, middle, low FROM median_fee_estimator ORDER BY measure_key DESC LIMIT ?"; + let mut stmt = conn.prepare(sql).expect("SQLite failure"); + + // shuttle high, low, middle estimates into these lists, and then sort and find median. + let mut highs = Vec::with_capacity(window_size as usize); + let mut mids = Vec::with_capacity(window_size as usize); + let mut lows = Vec::with_capacity(window_size as usize); + let results = stmt + .query_and_then::<_, SqliteError, _, _>(&[window_size], |row| { + let high: f64 = row.get("high")?; + let middle: f64 = row.get("middle")?; + let low: f64 = row.get("low")?; + Ok((low, middle, high)) + }) + .expect("SQLite failure"); + + for result in results { + let (low, middle, high) = result.expect("SQLite failure"); + highs.push(high); + mids.push(middle); + lows.push(low); + } + + if highs.is_empty() || mids.is_empty() || lows.is_empty() { + return Err(EstimatorError::NoEstimateAvailable); + } + + fn median(len: usize, l: Vec) -> f64 { + if len % 2 == 1 { + l[len / 2] + } else { + // note, measures_len / 2 - 1 >= 0, because + // len % 2 == 0 and emptiness is checked above + (l[len / 2] + l[len / 2 - 1]) / 2f64 + } + } + + // Sort our float arrays. For float values that do not compare easily, + // treat them as equals. + highs.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal)); + mids.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal)); + lows.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal)); + + Ok(FeeRateEstimate { + high: median(highs.len(), highs), + middle: median(mids.len(), mids), + low: median(lows.len(), lows), + }) + } + + fn update_estimate(&mut self, new_measure: FeeRateEstimate) { + let tx = tx_begin_immediate_sqlite(&mut self.db).expect("SQLite failure"); + let insert_sql = "INSERT INTO median_fee_estimator + (high, middle, low) VALUES (?, ?, ?)"; + let deletion_sql = "DELETE FROM median_fee_estimator + WHERE measure_key <= ( + SELECT MAX(measure_key) - ? + FROM median_fee_estimator )"; + tx.execute( + insert_sql, + rusqlite::params![new_measure.high, new_measure.middle, new_measure.low,], + ) + .expect("SQLite failure"); + tx.execute(deletion_sql, rusqlite::params![self.window_size]) + .expect("SQLite failure"); + + let estimate = Self::get_rate_estimates_from_sql(&tx, self.window_size); + tx.commit().expect("SQLite failure"); + if let Ok(next_estimate) = estimate { + debug!("Updating fee rate estimate for new block"; + "new_measure_high" => new_measure.high, + "new_measure_middle" => new_measure.middle, + "new_measure_low" => new_measure.low, + "new_estimate_high" => next_estimate.high, + "new_estimate_middle" => next_estimate.middle, + "new_estimate_low" => next_estimate.low); + } + } +} + +impl FeeEstimator for WeightedMedianFeeRateEstimator { + fn notify_block( + &mut self, + receipt: &StacksEpochReceipt, + block_limit: &ExecutionCost, + ) -> Result<(), EstimatorError> { + // Calculate sorted fee rate for each transaction in the block. + let mut working_fee_rates: Vec = receipt + .tx_receipts + .iter() + .filter_map(|tx_receipt| { + fee_rate_and_weight_from_receipt(&self.metric, &tx_receipt, block_limit) + }) + .collect(); + + // If necessary, add the "minimum" fee rate to fill the block. + maybe_add_minimum_fee_rate(&mut working_fee_rates, self.full_block_weight); + + // If fee rates non-empty, then compute an update. + if working_fee_rates.len() > 0 { + // Values must be sorted. + working_fee_rates.sort_by(|a, b| { + a.fee_rate + .partial_cmp(&b.fee_rate) + .unwrap_or(Ordering::Equal) + }); + + // Compute the estimate and update. + let block_estimate = fee_rate_estimate_from_sorted_weighted_fees(&working_fee_rates); + self.update_estimate(block_estimate); + } + + Ok(()) + } + + fn get_rate_estimates(&self) -> Result { + Self::get_rate_estimates_from_sql(&self.db, self.window_size) + } +} + +/// Computes a `FeeRateEstimate` based on `sorted_fee_rates` using a "weighted percentile" method +/// described in https://en.wikipedia.org/wiki/Percentile#Weighted_percentile +/// +/// The percentiles computed are [0.05, 0.5, 0.95]. +/// +/// `sorted_fee_rates` must be non-empty. +pub fn fee_rate_estimate_from_sorted_weighted_fees( + sorted_fee_rates: &[FeeRateAndWeight], +) -> FeeRateEstimate { + assert!(!sorted_fee_rates.is_empty()); + + let mut total_weight = 0f64; + for rate_and_weight in sorted_fee_rates { + total_weight += rate_and_weight.weight as f64; + } + + assert!(total_weight > 0f64); + + let mut cumulative_weight = 0f64; + let mut percentiles = Vec::new(); + for rate_and_weight in sorted_fee_rates { + cumulative_weight += rate_and_weight.weight as f64; + let percentile_n: f64 = + (cumulative_weight as f64 - rate_and_weight.weight as f64 / 2f64) / total_weight as f64; + percentiles.push(percentile_n); + } + assert_eq!(percentiles.len(), sorted_fee_rates.len()); + + let target_percentiles = vec![0.05, 0.5, 0.95]; + let mut fees_index = 0; // index into `sorted_fee_rates` + let mut values_at_target_percentiles = Vec::new(); + for target_percentile in target_percentiles { + while fees_index < percentiles.len() && percentiles[fees_index] < target_percentile { + fees_index += 1; + } + let v = if fees_index == 0 { + sorted_fee_rates[0].fee_rate + } else if fees_index == percentiles.len() { + sorted_fee_rates.last().unwrap().fee_rate + } else { + // Notation mimics https://en.wikipedia.org/wiki/Percentile#Weighted_percentile + let vk = sorted_fee_rates[fees_index - 1].fee_rate; + let vk1 = sorted_fee_rates[fees_index].fee_rate; + let pk = percentiles[fees_index - 1]; + let pk1 = percentiles[fees_index]; + vk + (target_percentile - pk) / (pk1 - pk) * (vk1 - vk) + }; + values_at_target_percentiles.push(v); + } + + FeeRateEstimate { + high: values_at_target_percentiles[2], + middle: values_at_target_percentiles[1], + low: values_at_target_percentiles[0], + } +} + +/// If the weights in `working_rates` do not add up to `full_block_weight`, add a new entry **in +/// place** that takes up the remaining space. +fn maybe_add_minimum_fee_rate(working_rates: &mut Vec, full_block_weight: u64) { + let mut total_weight = 0u64; + for rate_and_weight in working_rates.into_iter() { + total_weight = match total_weight.checked_add(rate_and_weight.weight) { + Some(result) => result, + None => return, + }; + } + + if total_weight < full_block_weight { + let weight_remaining = full_block_weight - total_weight; + working_rates.push(FeeRateAndWeight { + fee_rate: MINIMUM_TX_FEE_RATE, + weight: weight_remaining, + }) + } +} + +/// Depending on the type of the transaction, calculate fee rate and total cost. +/// +/// Returns None if: +/// 1) There is no fee rate for the tx. +/// 2) Cacluated fee rate is infinite. +fn fee_rate_and_weight_from_receipt( + metric: &dyn CostMetric, + tx_receipt: &StacksTransactionReceipt, + block_limit: &ExecutionCost, +) -> Option { + let (payload, fee, tx_size) = match tx_receipt.transaction { + TransactionOrigin::Stacks(ref tx) => Some((&tx.payload, tx.get_tx_fee(), tx.tx_len())), + TransactionOrigin::Burn(_) => None, + }?; + let scalar_cost = match payload { + TransactionPayload::TokenTransfer(_, _, _) => { + // TokenTransfers *only* contribute tx_len, and just have an empty ExecutionCost. + metric.from_len(tx_size) + } + TransactionPayload::Coinbase(_) => { + // Coinbase txs are "free", so they don't factor into the fee market. + return None; + } + TransactionPayload::PoisonMicroblock(_, _) + | TransactionPayload::ContractCall(_) + | TransactionPayload::SmartContract(_) => { + // These transaction payload types all "work" the same: they have associated ExecutionCosts + // and contibute to the block length limit with their tx_len + metric.from_cost_and_len(&tx_receipt.execution_cost, &block_limit, tx_size) + } + }; + let denominator = cmp::max(scalar_cost, 1) as f64; + let fee_rate = fee as f64 / denominator; + + if fee_rate.is_infinite() { + warn!("fee_rate is infinite for {:?}", tx_receipt); + None + } else { + let effective_fee_rate = if fee_rate < MINIMUM_TX_FEE_RATE { + MINIMUM_TX_FEE_RATE + } else { + fee_rate + }; + Some(FeeRateAndWeight { + fee_rate: effective_fee_rate, + weight: scalar_cost, + }) + } +} diff --git a/src/cost_estimates/fee_rate_fuzzer.rs b/src/cost_estimates/fee_rate_fuzzer.rs new file mode 100644 index 0000000000..2a54ad0d05 --- /dev/null +++ b/src/cost_estimates/fee_rate_fuzzer.rs @@ -0,0 +1,92 @@ +use vm::costs::ExecutionCost; + +use super::FeeRateEstimate; +use super::{EstimatorError, FeeEstimator}; +use chainstate::stacks::db::StacksEpochReceipt; +use rand::distributions::{Distribution, Uniform}; +use rand::rngs::StdRng; +use rand::thread_rng; +use rand::RngCore; +use rand::SeedableRng; + +/// The FeeRateFuzzer wraps an underlying FeeEstimator. It passes `notify_block` calls to the +/// underlying estimator. On `get_rate_estimates` calls, it adds a random fuzz to the result coming +/// back from the underlying estimator. The fuzz applied is as a random fraction of the base value. +/// +/// Note: We currently use "uniform" random noise instead of "normal" distributed noise to avoid +/// importing a new crate just for this. +pub struct FeeRateFuzzer { + /// We will apply a random "fuzz" on top of the estimates given by this. + underlying: UnderlyingEstimator, + /// Creator function for a new random generator. For prod, use `thread_rng`. For test, + /// pass in a contrived generator. + rng_creator: Box Box>, + /// The fuzzed rate will be `R * (1 + alpha)`, where `R` is the original rate, and `alpha` is a + /// random number in `[-uniform_fuzz_fraction, uniform_fuzz_fraction]`. + /// Note: Must be `0 <= uniform_fuzz_fraction < 1`. + uniform_fuzz_fraction: f64, +} + +impl FeeRateFuzzer { + /// Constructor for production. It uses `thread_rng()` as the random number generator, + /// to get truly pseudo-random numbers. + pub fn new( + underlying: UnderlyingEstimator, + uniform_fuzz_fraction: f64, + ) -> FeeRateFuzzer { + assert!(0.0 <= uniform_fuzz_fraction && uniform_fuzz_fraction < 1.0); + let rng_creator = Box::new(|| { + let r: Box = Box::new(thread_rng()); + r + }); + Self { + underlying, + rng_creator, + uniform_fuzz_fraction, + } + } + + /// Constructor meant for test. The user can pass in a contrived random number generator + /// factory function, so that the test is repeatable. + pub fn new_custom_creator( + underlying: UnderlyingEstimator, + rng_creator: Box Box>, + uniform_fuzz_fraction: f64, + ) -> FeeRateFuzzer { + assert!(0.0 <= uniform_fuzz_fraction && uniform_fuzz_fraction < 1.0); + Self { + underlying, + rng_creator, + uniform_fuzz_fraction, + } + } + + /// Add a uniform fuzz to input. Each element is multiplied by the same random factor. + fn fuzz_estimate(&self, input: FeeRateEstimate) -> FeeRateEstimate { + if self.uniform_fuzz_fraction > 0f64 { + let mut rng = (self.rng_creator)(); + let uniform = Uniform::new(-self.uniform_fuzz_fraction, self.uniform_fuzz_fraction); + let fuzz_scale = 1f64 + uniform.sample(&mut rng); + input * fuzz_scale + } else { + input + } + } +} + +impl FeeEstimator for FeeRateFuzzer { + /// Just passes the information straight to `underlying`. + fn notify_block( + &mut self, + receipt: &StacksEpochReceipt, + block_limit: &ExecutionCost, + ) -> Result<(), EstimatorError> { + self.underlying.notify_block(receipt, block_limit) + } + + /// Call underlying estimator and add some fuzz. + fn get_rate_estimates(&self) -> Result { + let underlying_estimate = self.underlying.get_rate_estimates()?; + Ok(self.fuzz_estimate(underlying_estimate)) + } +} diff --git a/src/cost_estimates/fee_scalar.rs b/src/cost_estimates/fee_scalar.rs index e0414aff03..3b46521cba 100644 --- a/src/cost_estimates/fee_scalar.rs +++ b/src/cost_estimates/fee_scalar.rs @@ -52,27 +52,18 @@ pub struct ScalarFeeRateEstimator { impl ScalarFeeRateEstimator { /// Open a fee rate estimator at the given db path. Creates if not existent. pub fn open(p: &Path, metric: M) -> Result { - let db = - sqlite_open(p, rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, false).or_else(|e| { - if let SqliteError::SqliteFailure(ref internal, _) = e { - if let rusqlite::ErrorCode::CannotOpen = internal.code { - let mut db = sqlite_open( - p, - rusqlite::OpenFlags::SQLITE_OPEN_CREATE - | rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, - false, - )?; - let tx = tx_begin_immediate_sqlite(&mut db)?; - Self::instantiate_db(&tx)?; - tx.commit()?; - Ok(db) - } else { - Err(e) - } - } else { - Err(e) - } - })?; + let mut db = sqlite_open( + p, + rusqlite::OpenFlags::SQLITE_OPEN_CREATE | rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, + false, + )?; + + // check if the db needs to be instantiated regardless of whether or not + // it was newly created: the db itself may be shared with other fee estimators, + // which would not have created the necessary table for this estimator. + let tx = tx_begin_immediate_sqlite(&mut db)?; + Self::instantiate_db(&tx)?; + tx.commit()?; Ok(Self { db, diff --git a/src/cost_estimates/mod.rs b/src/cost_estimates/mod.rs index fd481de13c..51402a7203 100644 --- a/src/cost_estimates/mod.rs +++ b/src/cost_estimates/mod.rs @@ -13,6 +13,8 @@ use vm::costs::ExecutionCost; use burnchains::Txid; use chainstate::stacks::db::StacksEpochReceipt; +pub mod fee_medians; +pub mod fee_rate_fuzzer; pub mod fee_scalar; pub mod metrics; pub mod pessimistic; diff --git a/src/cost_estimates/pessimistic.rs b/src/cost_estimates/pessimistic.rs index 3467ceef3d..dd94e5a34f 100644 --- a/src/cost_estimates/pessimistic.rs +++ b/src/cost_estimates/pessimistic.rs @@ -232,8 +232,8 @@ impl PessimisticEstimator { StacksEpochId::Epoch2_05 => ":2.05", }; format!( - "cc{}:{}.{}", - epoch_marker, cc.contract_name, cc.function_name + "cc{}:{}:{}.{}", + epoch_marker, cc.address, cc.contract_name, cc.function_name ) } TransactionPayload::SmartContract(_sc) => "contract-publish".to_string(), diff --git a/src/cost_estimates/tests/common.rs b/src/cost_estimates/tests/common.rs new file mode 100644 index 0000000000..1b529c8d9a --- /dev/null +++ b/src/cost_estimates/tests/common.rs @@ -0,0 +1,51 @@ +use chainstate::burn::ConsensusHash; +use chainstate::stacks::db::{StacksEpochReceipt, StacksHeaderInfo}; +use chainstate::stacks::events::StacksTransactionReceipt; +use types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockHeader, StacksWorkScore}; +use types::proof::TrieHash; +use util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; +use util::vrf::VRFProof; +use vm::costs::ExecutionCost; + +use crate::chainstate::stacks::{ + CoinbasePayload, StacksTransaction, TokenTransferMemo, TransactionAuth, + TransactionContractCall, TransactionPayload, TransactionSpendingCondition, TransactionVersion, +}; +use crate::core::StacksEpochId; + +/// Make a block receipt from `tx_receipts` with some dummy values filled for test. +#[cfg(test)] +pub fn make_block_receipt(tx_receipts: Vec) -> StacksEpochReceipt { + StacksEpochReceipt { + header: StacksHeaderInfo { + anchored_header: StacksBlockHeader { + version: 1, + total_work: StacksWorkScore { burn: 1, work: 1 }, + proof: VRFProof::empty(), + parent_block: BlockHeaderHash([0; 32]), + parent_microblock: BlockHeaderHash([0; 32]), + parent_microblock_sequence: 0, + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + microblock_pubkey_hash: Hash160([0; 20]), + }, + microblock_tail: None, + block_height: 1, + index_root: TrieHash([0; 32]), + consensus_hash: ConsensusHash([2; 20]), + burn_header_hash: BurnchainHeaderHash([1; 32]), + burn_header_height: 2, + burn_header_timestamp: 2, + anchored_block_size: 1, + }, + tx_receipts, + matured_rewards: vec![], + matured_rewards_info: None, + parent_microblocks_cost: ExecutionCost::zero(), + anchored_block_cost: ExecutionCost::zero(), + parent_burn_block_hash: BurnchainHeaderHash([0; 32]), + parent_burn_block_height: 1, + parent_burn_block_timestamp: 1, + evaluated_epoch: StacksEpochId::Epoch20, + } +} diff --git a/src/cost_estimates/tests/cost_estimators.rs b/src/cost_estimates/tests/cost_estimators.rs index d6e7203cd3..c3cef67d01 100644 --- a/src/cost_estimates/tests/cost_estimators.rs +++ b/src/cost_estimates/tests/cost_estimators.rs @@ -29,6 +29,7 @@ use crate::types::chainstate::StacksAddress; use crate::vm::types::{PrincipalData, StandardPrincipalData}; use crate::vm::Value; use core::BLOCK_LIMIT_MAINNET_20; +use cost_estimates::tests::common::*; fn instantiate_test_db() -> PessimisticEstimator { let mut path = env::temp_dir(); @@ -73,41 +74,6 @@ fn test_empty_pessimistic_estimator() { ); } -fn make_block_receipt(tx_receipts: Vec) -> StacksEpochReceipt { - StacksEpochReceipt { - header: StacksHeaderInfo { - anchored_header: StacksBlockHeader { - version: 1, - total_work: StacksWorkScore { burn: 1, work: 1 }, - proof: VRFProof::empty(), - parent_block: BlockHeaderHash([0; 32]), - parent_microblock: BlockHeaderHash([0; 32]), - parent_microblock_sequence: 0, - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash([0; 32]), - microblock_pubkey_hash: Hash160([0; 20]), - }, - microblock_tail: None, - block_height: 1, - index_root: TrieHash([0; 32]), - consensus_hash: ConsensusHash([2; 20]), - burn_header_hash: BurnchainHeaderHash([1; 32]), - burn_header_height: 2, - burn_header_timestamp: 2, - anchored_block_size: 1, - }, - tx_receipts, - matured_rewards: vec![], - matured_rewards_info: None, - parent_microblocks_cost: ExecutionCost::zero(), - anchored_block_cost: ExecutionCost::zero(), - parent_burn_block_hash: BurnchainHeaderHash([0; 32]), - parent_burn_block_height: 1, - parent_burn_block_timestamp: 1, - evaluated_epoch: StacksEpochId::Epoch20, - } -} - fn make_dummy_coinbase_tx() -> StacksTransactionReceipt { StacksTransactionReceipt::from_coinbase(StacksTransaction::new( TransactionVersion::Mainnet, @@ -282,6 +248,104 @@ fn test_pessimistic_cost_estimator_declining_average() { ); } +#[test] +/// This tests the PessimisticEstimator as a unit (i.e., separate +/// from the trait auto-impl method) by providing payload inputs +/// to produce the expected pessimistic result (i.e., mean over a 10-sample +/// window, where the window only updates if the new entry would make a dimension +/// worse). +fn pessimistic_estimator_contract_owner_separation() { + let mut estimator = instantiate_test_db(); + let cc_payload_0 = TransactionPayload::ContractCall(TransactionContractCall { + address: StacksAddress::new(0, Hash160([0; 20])), + contract_name: "contract-1".into(), + function_name: "func1".into(), + function_args: vec![], + }); + let cc_payload_1 = TransactionPayload::ContractCall(TransactionContractCall { + address: StacksAddress::new(0, Hash160([1; 20])), + contract_name: "contract-1".into(), + function_name: "func1".into(), + function_args: vec![], + }); + + estimator + .notify_event( + &cc_payload_0, + &ExecutionCost { + write_length: 1, + write_count: 1, + read_length: 1, + read_count: 1, + runtime: 1, + }, + &BLOCK_LIMIT_MAINNET_20, + &StacksEpochId::Epoch20, + ) + .expect("Should be able to process event"); + + assert_eq!( + estimator.estimate_cost(&cc_payload_1, &StacksEpochId::Epoch20,), + Err(EstimatorError::NoEstimateAvailable) + ); + + assert_eq!( + estimator + .estimate_cost(&cc_payload_0, &StacksEpochId::Epoch20,) + .expect("Should be able to provide cost estimate now"), + ExecutionCost { + write_length: 1, + write_count: 1, + read_length: 1, + read_count: 1, + runtime: 1, + } + ); + + estimator + .notify_event( + &cc_payload_1, + &ExecutionCost { + write_length: 5, + write_count: 5, + read_length: 5, + read_count: 5, + runtime: 5, + }, + &BLOCK_LIMIT_MAINNET_20, + &StacksEpochId::Epoch20, + ) + .expect("Should be able to process event"); + + // cc_payload_0 should not be affected + assert_eq!( + estimator + .estimate_cost(&cc_payload_0, &StacksEpochId::Epoch20,) + .expect("Should be able to provide cost estimate now"), + ExecutionCost { + write_length: 1, + write_count: 1, + read_length: 1, + read_count: 1, + runtime: 1, + } + ); + + // cc_payload_1 should be updated + assert_eq!( + estimator + .estimate_cost(&cc_payload_1, &StacksEpochId::Epoch20,) + .expect("Should be able to provide cost estimate now"), + ExecutionCost { + write_length: 5, + write_count: 5, + read_length: 5, + read_count: 5, + runtime: 5, + } + ); +} + #[test] /// This tests the PessimisticEstimator as a unit (i.e., separate /// from the trait auto-impl method) by providing payload inputs diff --git a/src/cost_estimates/tests/fee_medians.rs b/src/cost_estimates/tests/fee_medians.rs new file mode 100644 index 0000000000..cc67423aab --- /dev/null +++ b/src/cost_estimates/tests/fee_medians.rs @@ -0,0 +1,393 @@ +use std::{env, path::PathBuf}; +use time::Instant; + +use rand::seq::SliceRandom; +use rand::Rng; + +use cost_estimates::metrics::CostMetric; +use cost_estimates::{EstimatorError, FeeEstimator}; +use vm::costs::ExecutionCost; + +use chainstate::burn::ConsensusHash; +use chainstate::stacks::db::{StacksEpochReceipt, StacksHeaderInfo}; +use chainstate::stacks::events::StacksTransactionReceipt; +use types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockHeader, StacksWorkScore}; +use types::proof::TrieHash; +use util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; +use util::vrf::VRFProof; + +use crate::chainstate::stacks::{ + CoinbasePayload, StacksTransaction, TokenTransferMemo, TransactionAuth, + TransactionContractCall, TransactionPayload, TransactionSpendingCondition, TransactionVersion, +}; +use crate::core::StacksEpochId; +use crate::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; +use crate::cost_estimates::metrics::ProportionalDotProduct; +use crate::cost_estimates::FeeRateEstimate; +use crate::types::chainstate::StacksAddress; +use crate::vm::types::{PrincipalData, StandardPrincipalData}; +use crate::vm::Value; +use cost_estimates::fee_medians::fee_rate_estimate_from_sorted_weighted_fees; +use cost_estimates::fee_medians::FeeRateAndWeight; +use cost_estimates::tests::common::*; + +/// Returns true iff `b` is within `0.1%` of `a`. +fn is_close_f64(a: f64, b: f64) -> bool { + let error = (a - b).abs() / a.abs(); + error < 0.001 +} + +/// Returns `true` iff each value in `left` "close" to its counterpart in `right`. +fn is_close(left: FeeRateEstimate, right: FeeRateEstimate) -> bool { + let is_ok = is_close_f64(left.high, right.high) + && is_close_f64(left.middle, right.middle) + && is_close_f64(left.low, right.low); + if !is_ok { + warn!("ExecutionCost's are not close. {:?} vs {:?}", left, right); + } + is_ok +} + +fn instantiate_test_db(m: CM) -> WeightedMedianFeeRateEstimator { + let mut path = env::temp_dir(); + let random_bytes = rand::thread_rng().gen::<[u8; 32]>(); + path.push(&format!("fee_db_{}.sqlite", &to_hex(&random_bytes)[0..8])); + + let window_size = 5; + WeightedMedianFeeRateEstimator::open(&path, m, window_size) + .expect("Test failure: could not open fee rate DB") +} + +fn make_dummy_coinbase_tx() -> StacksTransaction { + StacksTransaction::new( + TransactionVersion::Mainnet, + TransactionAuth::Standard(TransactionSpendingCondition::new_initial_sighash()), + TransactionPayload::Coinbase(CoinbasePayload([0; 32])), + ) +} + +fn make_dummy_cc_tx(fee: u64, execution_cost: &ExecutionCost) -> StacksTransactionReceipt { + let mut tx = StacksTransaction::new( + TransactionVersion::Mainnet, + TransactionAuth::Standard(TransactionSpendingCondition::new_initial_sighash()), + TransactionPayload::ContractCall(TransactionContractCall { + address: StacksAddress::new(0, Hash160([0; 20])), + contract_name: "cc-dummy".into(), + function_name: "func-name".into(), + function_args: vec![], + }), + ); + tx.set_tx_fee(fee); + StacksTransactionReceipt::from_contract_call( + tx, + vec![], + Value::okay(Value::Bool(true)).unwrap(), + 0, + execution_cost.clone(), + ) +} + +const block_limit: ExecutionCost = ExecutionCost { + write_length: 100, + write_count: 100, + read_length: 100, + read_count: 100, + runtime: 100, +}; + +const tenth_operation_cost: ExecutionCost = ExecutionCost { + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 10, +}; + +const half_operation_cost: ExecutionCost = ExecutionCost { + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 50, +}; + +// The scalar cost of `make_dummy_cc_tx(_, &tenth_operation_cost)`. +const tenth_operation_cost_basis: u64 = 1164; + +// The scalar cost of `make_dummy_cc_tx(_, &half_operation_cost)`. +const half_operation_cost_basis: u64 = 5164; + +/// Tests that we have no estimate available until we `notify`. +#[test] +fn test_empty_fee_estimator() { + let metric = ProportionalDotProduct::new(10_000); + let estimator = instantiate_test_db(metric); + assert_eq!( + estimator + .get_rate_estimates() + .expect_err("Empty rate estimator should error."), + EstimatorError::NoEstimateAvailable + ); +} + +/// If we do not have any transactions in a block, we should fill the space +/// with a transaction with fee rate 1f. This means that, for a totally empty +/// block, the fee rate should be 1f. +#[test] +fn test_empty_block_returns_minimum() { + let metric = ProportionalDotProduct::new(10_000); + let mut estimator = instantiate_test_db(metric); + + let empty_block_receipt = make_block_receipt(vec![]); + estimator + .notify_block(&empty_block_receipt, &block_limit) + .expect("Should be able to process an empty block"); + + assert!(is_close( + estimator + .get_rate_estimates() + .expect("Should be able to create estimate now"), + FeeRateEstimate { + high: 1f64, + middle: 1f64, + low: 1f64 + } + )); +} + +/// A block that is only a very small minority filled should reflect the paid value, +/// but be dominated by the padded fee rate. +#[test] +fn test_one_block_partially_filled() { + let metric = ProportionalDotProduct::new(10_000); + let mut estimator = instantiate_test_db(metric); + + let single_tx_receipt = make_block_receipt(vec![ + StacksTransactionReceipt::from_coinbase(make_dummy_coinbase_tx()), + make_dummy_cc_tx(10 * tenth_operation_cost_basis, &tenth_operation_cost), + ]); + + estimator + .notify_block(&single_tx_receipt, &block_limit) + .expect("Should be able to process block receipt"); + + // The higher fee is 10, because of the operation paying 10f per cost. + // The middle fee should be near 1, because the block is mostly empty, and dominated by the + // minimum fee rate padding. + // The lower fee is 1 because of the minimum fee rate padding. + assert!(is_close( + estimator + .get_rate_estimates() + .expect("Should be able to create estimate now"), + FeeRateEstimate { + high: 10.0f64, + middle: 2.0475999999999996f64, + low: 1f64 + } + )); +} + +/// A block that is mostly filled should create an estimate dominated by the transactions paid, and +/// the padding should only affect `low`. +#[test] +fn test_one_block_mostly_filled() { + let metric = ProportionalDotProduct::new(10_000); + let mut estimator = instantiate_test_db(metric); + + let single_tx_receipt = make_block_receipt(vec![ + StacksTransactionReceipt::from_coinbase(make_dummy_coinbase_tx()), + make_dummy_cc_tx(10 * half_operation_cost_basis, &half_operation_cost), + make_dummy_cc_tx(10 * tenth_operation_cost_basis, &tenth_operation_cost), + make_dummy_cc_tx(10 * tenth_operation_cost_basis, &tenth_operation_cost), + make_dummy_cc_tx(10 * tenth_operation_cost_basis, &tenth_operation_cost), + ]); + + estimator + .notify_block(&single_tx_receipt, &block_limit) + .expect("Should be able to process block receipt"); + + // The higher fee is 10, because that's what we paid. + // The middle fee should be 10, because the block is mostly filled. + // The lower fee is 1 because of the minimum fee rate padding. + assert!(is_close( + estimator + .get_rate_estimates() + .expect("Should be able to create estimate now"), + FeeRateEstimate { + high: 10.0f64, + middle: 10.0f64, + low: 1f64 + } + )); +} + +/// Tests the effect of adding blocks over time. We add five blocks with an easy to calculate +/// median. +/// +/// We add 5 blocks with window size 5 so none should be forgotten. +#[test] +fn test_window_size_forget_nothing() { + let metric = ProportionalDotProduct::new(10_000); + let mut estimator = instantiate_test_db(metric); + + for i in 1..6 { + let single_tx_receipt = make_block_receipt(vec![ + StacksTransactionReceipt::from_coinbase(make_dummy_coinbase_tx()), + make_dummy_cc_tx(i * 10 * half_operation_cost_basis, &half_operation_cost), + make_dummy_cc_tx(i * 10 * half_operation_cost_basis, &half_operation_cost), + ]); + + estimator + .notify_block(&single_tx_receipt, &block_limit) + .expect("Should be able to process block receipt"); + } + + // The fee should be 30, because it's the median of [10, 20, .., 50]. + assert!(is_close( + estimator + .get_rate_estimates() + .expect("Should be able to create estimate now"), + FeeRateEstimate { + high: 30f64, + middle: 30f64, + low: 30f64 + } + )); +} + +/// Tests the effect of adding blocks over time. We add five blocks with an easy to calculate +/// median. +/// +/// We add 10 blocks with window size 5 so the first 5 should be forgotten. +#[test] +fn test_window_size_forget_something() { + let metric = ProportionalDotProduct::new(10_000); + let mut estimator = instantiate_test_db(metric); + + for i in 1..11 { + let single_tx_receipt = make_block_receipt(vec![ + StacksTransactionReceipt::from_coinbase(make_dummy_coinbase_tx()), + make_dummy_cc_tx(i * 10 * half_operation_cost_basis, &half_operation_cost), + make_dummy_cc_tx(i * 10 * half_operation_cost_basis, &half_operation_cost), + ]); + + estimator + .notify_block(&single_tx_receipt, &block_limit) + .expect("Should be able to process block receipt"); + } + + // The fee should be 80, because we forgot the first five estimates. + assert!(is_close( + estimator + .get_rate_estimates() + .expect("Should be able to create estimate now"), + FeeRateEstimate { + high: 80f64, + middle: 80f64, + low: 80f64 + } + )); +} + +#[test] +fn test_fee_rate_estimate_5_vs_95() { + assert_eq!( + fee_rate_estimate_from_sorted_weighted_fees(&vec![ + FeeRateAndWeight { + fee_rate: 1f64, + weight: 5u64, + }, + FeeRateAndWeight { + fee_rate: 10f64, + weight: 95u64, + }, + ]), + FeeRateEstimate { + high: 10.0f64, + middle: 9.549999999999999f64, + low: 1.45f64 + } + ); +} + +#[test] +fn test_fee_rate_estimate_50_vs_50() { + assert_eq!( + fee_rate_estimate_from_sorted_weighted_fees(&vec![ + FeeRateAndWeight { + fee_rate: 1f64, + weight: 50u64, + }, + FeeRateAndWeight { + fee_rate: 10f64, + weight: 50u64, + }, + ]), + FeeRateEstimate { + high: 10.0f64, + middle: 5.5f64, + low: 1.0f64 + } + ); +} + +#[test] +fn test_fee_rate_estimate_95_vs_5() { + assert_eq!( + fee_rate_estimate_from_sorted_weighted_fees(&vec![ + FeeRateAndWeight { + fee_rate: 1f64, + weight: 95u64, + }, + FeeRateAndWeight { + fee_rate: 10f64, + weight: 5u64, + }, + ]), + FeeRateEstimate { + high: 9.549999999999999f64, + middle: 1.4500000000000004f64, + low: 1.0f64 + } + ); +} + +#[test] +fn test_fee_rate_estimate_20() { + let mut pairs = vec![]; + for i in 1..21 { + pairs.push(FeeRateAndWeight { + fee_rate: 1f64 * i as f64, + weight: 1u64, + }) + } + + assert_eq!( + fee_rate_estimate_from_sorted_weighted_fees(&pairs), + FeeRateEstimate { + high: 19.5f64, + middle: 10.5f64, + low: 1.5f64 + } + ); +} + +#[test] +fn test_fee_rate_estimate_100() { + let mut pairs = vec![]; + for i in 1..101 { + pairs.push(FeeRateAndWeight { + fee_rate: 1f64 * i as f64, + weight: 1u64, + }) + } + + assert_eq!( + fee_rate_estimate_from_sorted_weighted_fees(&pairs), + FeeRateEstimate { + high: 95.5f64, + middle: 50.5f64, + low: 5.5f64 + } + ); +} diff --git a/src/cost_estimates/tests/fee_rate_fuzzer.rs b/src/cost_estimates/tests/fee_rate_fuzzer.rs new file mode 100644 index 0000000000..70f1fdbf30 --- /dev/null +++ b/src/cost_estimates/tests/fee_rate_fuzzer.rs @@ -0,0 +1,153 @@ +use cost_estimates::metrics::CostMetric; +use cost_estimates::{EstimatorError, FeeEstimator}; +use vm::costs::ExecutionCost; + +use chainstate::burn::ConsensusHash; +use chainstate::stacks::db::{StacksEpochReceipt, StacksHeaderInfo}; +use chainstate::stacks::events::StacksTransactionReceipt; +use types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockHeader, StacksWorkScore}; +use types::proof::TrieHash; +use util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; +use util::vrf::VRFProof; + +use crate::chainstate::stacks::{ + CoinbasePayload, StacksTransaction, TokenTransferMemo, TransactionAuth, + TransactionContractCall, TransactionPayload, TransactionSpendingCondition, TransactionVersion, +}; +use crate::core::StacksEpochId; +use crate::cost_estimates::FeeRateEstimate; +use cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; +use rand::rngs::StdRng; +use rand::thread_rng; +use rand::RngCore; +use rand::SeedableRng; + +use cost_estimates::tests::common::make_block_receipt; + +struct ConstantFeeEstimator {} + +/// Returns a constant fee rate estimate. +impl FeeEstimator for ConstantFeeEstimator { + fn notify_block( + &mut self, + receipt: &StacksEpochReceipt, + block_limit: &ExecutionCost, + ) -> Result<(), EstimatorError> { + Ok(()) + } + + fn get_rate_estimates(&self) -> Result { + Ok(FeeRateEstimate { + high: 95f64, + middle: 50f64, + low: 5f64, + }) + } +} + +/// Test the fuzzer using a fixed random seed. +#[test] +fn test_fuzzing_seed1() { + let mock_estimator = ConstantFeeEstimator {}; + let rng_creator = Box::new(|| { + let seed = [0u8; 32]; + let rng: StdRng = SeedableRng::from_seed(seed); + let r: Box = Box::new(rng); + r + }); + let fuzzed_estimator = FeeRateFuzzer::new_custom_creator(mock_estimator, rng_creator, 0.1); + + assert_eq!( + fuzzed_estimator + .get_rate_estimates() + .expect("Estimate should exist."), + FeeRateEstimate { + high: 96.20545857700169f64, + middle: 50.63445188263247f64, + low: 5.0634451882632465f64 + } + ); +} + +/// Test the fuzzer using a fixed random seed. Uses a different seed than test_fuzzing_seed1. +#[test] +fn test_fuzzing_seed2() { + let mock_estimator = ConstantFeeEstimator {}; + let rng_creator = Box::new(|| { + let seed = [1u8; 32]; + let rng: StdRng = SeedableRng::from_seed(seed); + let r: Box = Box::new(rng); + r + }); + let fuzzed_estimator = FeeRateFuzzer::new_custom_creator(mock_estimator, rng_creator, 0.1); + + assert_eq!( + fuzzed_estimator + .get_rate_estimates() + .expect("Estimate should exist."), + FeeRateEstimate { + high: 100.08112623179122f64, + middle: 52.67427696410064f64, + low: 5.267427696410064f64 + } + ); +} + +struct CountingFeeEstimator { + counter: u64, +} + +/// This class "counts" the number of times `notify_block` has been called, and returns this as the +/// estimate. +impl FeeEstimator for CountingFeeEstimator { + fn notify_block( + &mut self, + receipt: &StacksEpochReceipt, + block_limit: &ExecutionCost, + ) -> Result<(), EstimatorError> { + self.counter += 1; + Ok(()) + } + + fn get_rate_estimates(&self) -> Result { + Ok(FeeRateEstimate { + high: self.counter as f64, + middle: self.counter as f64, + low: self.counter as f64, + }) + } +} + +/// Tests that the receipt is passed through in `notify_block`. +#[test] +fn test_notify_pass_through() { + let mock_estimator = CountingFeeEstimator { counter: 0 }; + let rng_creator = Box::new(|| { + let seed = [1u8; 32]; + let rng: StdRng = SeedableRng::from_seed(seed); + let r: Box = Box::new(rng); + r + }); + let mut fuzzed_estimator = FeeRateFuzzer::new_custom_creator(mock_estimator, rng_creator, 0.1); + + let receipt = make_block_receipt(vec![]); + fuzzed_estimator + .notify_block(&receipt, &ExecutionCost::max_value()) + .expect("notify_block should succeed here."); + fuzzed_estimator + .notify_block(&receipt, &ExecutionCost::max_value()) + .expect("notify_block should succeed here."); + + // We've called `notify_block` twice, so the values returned are 2f, with some noise from the + // fuzzer. + assert_eq!( + fuzzed_estimator + .get_rate_estimates() + .expect("Estimate should exist."), + FeeRateEstimate { + high: 2.1069710785640257f64, + middle: 2.1069710785640257f64, + low: 2.1069710785640257f64 + }, + ); +} diff --git a/src/cost_estimates/tests/fee_scalar.rs b/src/cost_estimates/tests/fee_scalar.rs index f440384c76..5ad4566586 100644 --- a/src/cost_estimates/tests/fee_scalar.rs +++ b/src/cost_estimates/tests/fee_scalar.rs @@ -27,6 +27,8 @@ use crate::types::chainstate::StacksAddress; use crate::vm::types::{PrincipalData, StandardPrincipalData}; use crate::vm::Value; +use cost_estimates::tests::common::make_block_receipt; + fn instantiate_test_db(m: CM) -> ScalarFeeRateEstimator { let mut path = env::temp_dir(); let random_bytes = rand::thread_rng().gen::<[u8; 32]>(); @@ -71,41 +73,6 @@ fn test_empty_fee_estimator() { ); } -fn make_block_receipt(tx_receipts: Vec) -> StacksEpochReceipt { - StacksEpochReceipt { - header: StacksHeaderInfo { - anchored_header: StacksBlockHeader { - version: 1, - total_work: StacksWorkScore { burn: 1, work: 1 }, - proof: VRFProof::empty(), - parent_block: BlockHeaderHash([0; 32]), - parent_microblock: BlockHeaderHash([0; 32]), - parent_microblock_sequence: 0, - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash([0; 32]), - microblock_pubkey_hash: Hash160([0; 20]), - }, - microblock_tail: None, - block_height: 1, - index_root: TrieHash([0; 32]), - consensus_hash: ConsensusHash([2; 20]), - burn_header_hash: BurnchainHeaderHash([1; 32]), - burn_header_height: 2, - burn_header_timestamp: 2, - anchored_block_size: 1, - }, - tx_receipts, - matured_rewards: vec![], - matured_rewards_info: None, - parent_microblocks_cost: ExecutionCost::zero(), - anchored_block_cost: ExecutionCost::zero(), - parent_burn_block_hash: BurnchainHeaderHash([0; 32]), - parent_burn_block_height: 1, - parent_burn_block_timestamp: 1, - evaluated_epoch: StacksEpochId::Epoch20, - } -} - fn make_dummy_coinbase_tx() -> StacksTransaction { StacksTransaction::new( TransactionVersion::Mainnet, diff --git a/src/cost_estimates/tests/mod.rs b/src/cost_estimates/tests/mod.rs index 28ade005c5..8b5ce592b1 100644 --- a/src/cost_estimates/tests/mod.rs +++ b/src/cost_estimates/tests/mod.rs @@ -1,6 +1,9 @@ use cost_estimates::FeeRateEstimate; +pub mod common; pub mod cost_estimators; +pub mod fee_medians; +pub mod fee_rate_fuzzer; pub mod fee_scalar; pub mod metrics; diff --git a/src/lib.rs b/src/lib.rs index 455367f807..df4b3ffa2e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -37,6 +37,7 @@ extern crate regex; extern crate ripemd160; extern crate sha2; extern crate sha3; +extern crate siphasher; extern crate time; extern crate url; diff --git a/src/libclarity.rs b/src/libclarity.rs index 22b506f431..438c7cf7cd 100644 --- a/src/libclarity.rs +++ b/src/libclarity.rs @@ -28,7 +28,6 @@ extern crate rand_chacha; extern crate rusqlite; extern crate secp256k1; extern crate serde; -extern crate tini; #[macro_use] extern crate lazy_static; extern crate integer_sqrt; @@ -38,6 +37,7 @@ extern crate regex; extern crate ripemd160; extern crate sha2; extern crate sha3; +extern crate siphasher; extern crate time; extern crate url; diff --git a/src/monitoring/mod.rs b/src/monitoring/mod.rs index bc38e98fde..4d9f6fa49e 100644 --- a/src/monitoring/mod.rs +++ b/src/monitoring/mod.rs @@ -28,12 +28,14 @@ use crate::{ }, }; use burnchains::BurnchainSigner; +use std::convert::TryInto; use std::error::Error; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Mutex; use util::db::sqlite_open; use util::db::Error as DatabaseError; use util::uint::{Uint256, Uint512}; +use vm::costs::ExecutionCost; #[cfg(feature = "monitoring_prom")] mod prometheus; @@ -104,6 +106,27 @@ pub fn increment_btc_blocks_received_counter() { prometheus::BTC_BLOCKS_RECEIVED_COUNTER.inc(); } +/// Log `execution_cost` as a ratio of `block_limit`. +#[allow(unused_variables)] +pub fn set_last_execution_cost_observed( + execution_cost: &ExecutionCost, + block_limit: &ExecutionCost, +) { + #[cfg(feature = "monitoring_prom")] + { + prometheus::LAST_BLOCK_READ_COUNT + .set(execution_cost.read_count as f64 / block_limit.read_count as f64); + prometheus::LAST_BLOCK_WRITE_COUNT + .set(execution_cost.write_count as f64 / block_limit.read_count as f64); + prometheus::LAST_BLOCK_READ_LENGTH + .set(execution_cost.read_length as f64 / block_limit.read_length as f64); + prometheus::LAST_BLOCK_WRITE_LENGTH + .set(execution_cost.write_length as f64 / block_limit.write_length as f64); + prometheus::LAST_BLOCK_RUNTIME + .set(execution_cost.runtime as f64 / block_limit.runtime as f64); + } +} + pub fn increment_btc_ops_sent_counter() { #[cfg(feature = "monitoring_prom")] prometheus::BTC_OPS_SENT_COUNTER.inc(); @@ -397,6 +420,7 @@ pub fn set_burnchain_signer(signer: BurnchainSigner) -> Result<(), SetGlobalBurn Ok(()) } +#[allow(unreachable_code)] pub fn get_burnchain_signer() -> Option { #[cfg(feature = "monitoring_prom")] { diff --git a/src/monitoring/prometheus.rs b/src/monitoring/prometheus.rs index 0154e8ff55..9a9d7f8f46 100644 --- a/src/monitoring/prometheus.rs +++ b/src/monitoring/prometheus.rs @@ -91,6 +91,31 @@ lazy_static! { "Total number of error logs emitted by node" )).unwrap(); + pub static ref LAST_BLOCK_READ_COUNT: Gauge = register_gauge!(opts!( + "stacks_node_last_block_read_count", + "`execution_cost_read_count` for the last block observed." + )).unwrap(); + + pub static ref LAST_BLOCK_WRITE_COUNT: Gauge = register_gauge!(opts!( + "stacks_node_last_block_write_count", + "`execution_cost_write_count` for the last block observed." + )).unwrap(); + + pub static ref LAST_BLOCK_READ_LENGTH: Gauge = register_gauge!(opts!( + "stacks_node_last_block_read_length", + "`execution_cost_read_length` for the last block observed." + )).unwrap(); + + pub static ref LAST_BLOCK_WRITE_LENGTH: Gauge = register_gauge!(opts!( + "stacks_node_last_block_write_length", + "`execution_cost_write_length` for the last block observed." + )).unwrap(); + + pub static ref LAST_BLOCK_RUNTIME: Gauge = register_gauge!(opts!( + "stacks_node_last_block_runtime", + "`execution_cost_runtime` for the last block observed." + )).unwrap(); + pub static ref ACTIVE_MINERS_COUNT_GAUGE: IntGauge = register_int_gauge!(opts!( "stacks_node_active_miners_total", "Total number of active miners" diff --git a/src/net/atlas/db.rs b/src/net/atlas/db.rs index c145fffb94..928f464f12 100644 --- a/src/net/atlas/db.rs +++ b/src/net/atlas/db.rs @@ -55,7 +55,6 @@ const ATLASDB_INITIAL_SCHEMA: &'static [&'static str] = &[ was_instantiated INTEGER NOT NULL, created_at INTEGER NOT NULL );"#, - "CREATE INDEX index_was_instantiated ON attachments(was_instantiated);", r#" CREATE TABLE attachment_instances( content_hash TEXT, @@ -72,6 +71,9 @@ const ATLASDB_INITIAL_SCHEMA: &'static [&'static str] = &[ "CREATE TABLE db_config(version TEXT NOT NULL);", ]; +const ATLASDB_INDEXES: &'static [&'static str] = + &["CREATE INDEX IF NOT EXISTS index_was_instantiated ON attachments(was_instantiated);"]; + impl FromRow for Attachment { fn from_row<'a>(row: &'a Row) -> Result { let content: Vec = row.get_unwrap("content"); @@ -120,6 +122,15 @@ pub struct AtlasDB { } impl AtlasDB { + fn add_indexes(&mut self) -> Result<(), db_error> { + let tx = self.tx_begin()?; + for row_text in ATLASDB_INDEXES { + tx.execute_batch(row_text).map_err(db_error::SqliteError)?; + } + tx.commit()?; + Ok(()) + } + fn instantiate(&mut self) -> Result<(), db_error> { let genesis_attachments = self.atlas_config.genesis_attachments.take(); @@ -152,6 +163,7 @@ impl AtlasDB { tx.commit().map_err(db_error::SqliteError)?; + self.add_indexes()?; Ok(()) } @@ -208,6 +220,9 @@ impl AtlasDB { if create_flag { db.instantiate()?; } + if readwrite { + db.add_indexes()?; + } Ok(db) } diff --git a/src/net/atlas/download.rs b/src/net/atlas/download.rs index 0fec7a721b..33a6f78ce3 100644 --- a/src/net/atlas/download.rs +++ b/src/net/atlas/download.rs @@ -47,6 +47,8 @@ use rand::thread_rng; use rand::Rng; use std::cmp; +use core::mempool::MemPoolDB; + #[derive(Debug)] pub struct AttachmentsDownloader { priority_queue: BinaryHeap, @@ -100,6 +102,7 @@ impl AttachmentsDownloader { pub fn run( &mut self, dns_client: &mut DNSClient, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, network: &mut PeerNetwork, ) -> Result<(Vec<(AttachmentInstance, Attachment)>, Vec), net_error> { @@ -158,8 +161,13 @@ impl AttachmentsDownloader { } }; - let mut progress = - AttachmentsBatchStateMachine::try_proceed(ongoing_fsm, dns_client, network, chainstate); + let mut progress = AttachmentsBatchStateMachine::try_proceed( + ongoing_fsm, + dns_client, + network, + mempool, + chainstate, + ); match progress { AttachmentsBatchStateMachine::Done(ref mut context) => { @@ -547,6 +555,7 @@ impl AttachmentsBatchStateMachine { fsm: AttachmentsBatchStateMachine, dns_client: &mut DNSClient, network: &mut PeerNetwork, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, ) -> AttachmentsBatchStateMachine { match fsm { @@ -582,6 +591,7 @@ impl AttachmentsBatchStateMachine { attachments_invs_requests, &context.dns_lookups, network, + mempool, chainstate, &context.connection_options, ) { @@ -606,6 +616,7 @@ impl AttachmentsBatchStateMachine { attachments_requests, &context.dns_lookups, network, + mempool, chainstate, &context.connection_options, ) { @@ -768,6 +779,7 @@ impl BatchedRequestsState fsm: BatchedRequestsState, dns_lookups: &HashMap>>, network: &mut PeerNetwork, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, connection_options: &ConnectionOptions, ) -> BatchedRequestsState { @@ -794,6 +806,7 @@ impl BatchedRequestsState network, dns_lookups, &mut requestables, + mempool, chainstate, ); if let Some((request, event_id)) = res { diff --git a/src/net/chat.rs b/src/net/chat.rs index 7dce3ff881..4b505287f1 100644 --- a/src/net/chat.rs +++ b/src/net/chat.rs @@ -621,6 +621,13 @@ impl ConversationP2P { self.burnchain_stable_tip_burn_header_hash.clone() } + /// Does this remote neighbor support the mempool query interface? It will if it has both + /// RELAY and RPC bits set. + pub fn supports_mempool_query(peer_services: u16) -> bool { + let expected_bits = (ServiceFlags::RELAY as u16) | (ServiceFlags::RPC as u16); + (peer_services & expected_bits) == expected_bits + } + /// Determine whether or not a given (height, burn_header_hash) pair _disagrees_ with our /// burnchain view. If it does, return true. If it doesn't (including if the given pair is /// simply absent from the chain_view), then return False. @@ -1126,18 +1133,19 @@ impl ConversationP2P { "upgraded" }; - debug!( - "Handshake from {:?} {} public key {:?} expires at {:?}", - &self, - _authentic_msg, - &to_hex( + debug!("Handling handshake"; + "neighbor" => ?self, + "authentic_msg" => &_authentic_msg, + "public_key" => &to_hex( &handshake_data .node_public_key .to_public_key() .unwrap() .to_bytes_compressed() - ), - handshake_data.expire_block_height + ), + "services" => &to_hex(&handshake_data.services.to_be_bytes()), + "expires_block_height" => handshake_data.expire_block_height, + "supports_mempool_query" => Self::supports_mempool_query(handshake_data.services), ); if updated { diff --git a/src/net/codec.rs b/src/net/codec.rs index 5641eeea93..d7c4ee4975 100644 --- a/src/net/codec.rs +++ b/src/net/codec.rs @@ -59,19 +59,24 @@ use crate::types::chainstate::BurnchainHeaderHash; use crate::types::chainstate::StacksBlockHeader; use crate::types::StacksPublicKeyBuffer; -// macro for determining how big an inv bitvec can be, given its bitlen -macro_rules! BITVEC_LEN { - ($bitvec:expr) => { - (($bitvec) / 8 + if ($bitvec) % 8 > 0 { 1 } else { 0 }) as u32 - }; -} - impl_stacks_message_codec_for_int!(u8; [0; 1]); impl_stacks_message_codec_for_int!(u16; [0; 2]); impl_stacks_message_codec_for_int!(u32; [0; 4]); impl_stacks_message_codec_for_int!(u64; [0; 8]); impl_stacks_message_codec_for_int!(i64; [0; 8]); +impl StacksMessageCodec for [u8; 32] { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + fd.write_all(self).map_err(codec_error::WriteError) + } + + fn consensus_deserialize(fd: &mut R) -> Result<[u8; 32], codec_error> { + let mut buf = [0u8; 32]; + fd.read_exact(&mut buf).map_err(codec_error::ReadError)?; + Ok(buf) + } +} + impl StacksPublicKeyBuffer { pub fn from_public_key(pubkey: &Secp256k1PublicKey) -> StacksPublicKeyBuffer { let pubkey_bytes_vec = pubkey.to_bytes_compressed(); @@ -761,6 +766,41 @@ impl StacksMessageCodec for NatPunchData { } } +impl StacksMessageCodec for MemPoolSyncData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + match *self { + MemPoolSyncData::BloomFilter(ref bloom_filter) => { + write_next(fd, &MemPoolSyncDataID::BloomFilter.to_u8())?; + write_next(fd, bloom_filter)?; + } + MemPoolSyncData::TxTags(ref seed, ref tags) => { + write_next(fd, &MemPoolSyncDataID::TxTags.to_u8())?; + write_next(fd, seed)?; + write_next(fd, tags)?; + } + } + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let data_id: u8 = read_next(fd)?; + match MemPoolSyncDataID::from_u8(data_id).ok_or(codec_error::DeserializeError(format!( + "Unrecognized MemPoolSyncDataID {}", + &data_id + )))? { + MemPoolSyncDataID::BloomFilter => { + let bloom_filter: BloomFilter = read_next(fd)?; + Ok(MemPoolSyncData::BloomFilter(bloom_filter)) + } + MemPoolSyncDataID::TxTags => { + let seed: [u8; 32] = read_next(fd)?; + let txtags: Vec = read_next(fd)?; + Ok(MemPoolSyncData::TxTags(seed, txtags)) + } + } + } +} + impl StacksMessageCodec for RelayData { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &self.peer)?; diff --git a/src/net/connection.rs b/src/net/connection.rs index 3cac65eafa..b783a85748 100644 --- a/src/net/connection.rs +++ b/src/net/connection.rs @@ -36,6 +36,7 @@ use mio::net as mio_net; use crate::codec::StacksMessageCodec; use crate::codec::MAX_MESSAGE_LEN; +use core::mempool::MAX_BLOOM_COUNTER_TXS; use net::codec::*; use net::Error as net_error; use net::HttpRequestPreamble; @@ -49,7 +50,7 @@ use net::StacksHttp; use net::StacksP2P; use net::download::BLOCK_DOWNLOAD_INTERVAL; -use net::inv::{FULL_INV_SYNC_INTERVAL, INV_REWARD_CYCLES, INV_SYNC_INTERVAL}; +use net::inv::{INV_REWARD_CYCLES, INV_SYNC_INTERVAL}; use net::neighbors::{ NEIGHBOR_REQUEST_TIMEOUT, NEIGHBOR_WALK_INTERVAL, NUM_INITIAL_WALKS, WALK_MAX_DURATION, WALK_MIN_DURATION, WALK_RESET_INTERVAL, WALK_RESET_PROB, WALK_RETRY_COUNT, WALK_STATE_TIMEOUT, @@ -348,7 +349,6 @@ pub struct ConnectionOptions { pub walk_reset_interval: u64, pub walk_state_timeout: u64, pub inv_sync_interval: u64, - pub full_inv_sync_interval: u64, pub inv_reward_cycles: u64, pub download_interval: u64, pub pingback_timeout: u64, @@ -374,6 +374,12 @@ pub struct ConnectionOptions { pub max_buffered_microblocks_available: u64, pub max_buffered_blocks: u64, pub max_buffered_microblocks: u64, + /// how often to query a remote peer for its mempool, in seconds + pub mempool_sync_interval: u64, + /// how many transactions to ask for in a mempool query + pub mempool_max_tx_query: u64, + /// how long a mempool sync is allowed to take, in total, before timing out + pub mempool_sync_timeout: u64, // fault injection pub disable_neighbor_walk: bool, @@ -384,6 +390,8 @@ pub struct ConnectionOptions { pub disable_network_prune: bool, pub disable_network_bans: bool, pub disable_block_advertisement: bool, + pub disable_block_push: bool, + pub disable_microblock_push: bool, pub disable_pingbacks: bool, pub disable_inbound_walks: bool, pub disable_natpunch: bool, @@ -424,7 +432,6 @@ impl std::default::Default for ConnectionOptions { walk_reset_interval: WALK_RESET_INTERVAL, walk_state_timeout: WALK_STATE_TIMEOUT, inv_sync_interval: INV_SYNC_INTERVAL, // how often to synchronize block inventories - full_inv_sync_interval: FULL_INV_SYNC_INTERVAL, // how often to synchronize the *full* inventory inv_reward_cycles: INV_REWARD_CYCLES, // how many reward cycles of blocks to sync in a non-full inventory sync download_interval: BLOCK_DOWNLOAD_INTERVAL, // how often to scan for blocks to download pingback_timeout: 60, @@ -450,12 +457,15 @@ impl std::default::Default for ConnectionOptions { public_ip_max_retries: 3, // maximum number of retries before self-throttling for $public_ip_timeout max_block_push: 10, // maximum number of blocksData messages to push out via our anti-entropy protocol max_microblock_push: 10, // maximum number of microblocks messages to push out via our anti-entropy protocol - antientropy_retry: 3600, // retry pushing data only once every hour + antientropy_retry: 60, // retry pushing data once every minute antientropy_public: true, // run antientropy even if we're NOT NAT'ed max_buffered_blocks_available: 1, max_buffered_microblocks_available: 1, max_buffered_blocks: 1, max_buffered_microblocks: 10, + mempool_sync_interval: 30, // number of seconds in-between mempool sync + mempool_max_tx_query: 128, // maximum number of transactions to visit per mempool query + mempool_sync_timeout: 180, // how long a mempool sync can go for (3 minutes) // no faults on by default disable_neighbor_walk: false, @@ -466,6 +476,8 @@ impl std::default::Default for ConnectionOptions { disable_network_prune: false, disable_network_bans: false, disable_block_advertisement: false, + disable_block_push: false, + disable_microblock_push: false, disable_pingbacks: false, disable_inbound_walks: false, disable_natpunch: false, @@ -1048,7 +1060,10 @@ impl ConnectionOutbox

{ message_eof = true; 0 } - Ok(read_len) => read_len, + Ok(read_len) => { + test_debug!("Connection message pipe returned {} bytes", read_len); + read_len + } Err(ioe) => match ioe.kind() { io::ErrorKind::WouldBlock => { // no data consumed, but we may need to make a break for it @@ -1078,14 +1093,13 @@ impl ConnectionOutbox

{ self.socket_out_buf.extend_from_slice(&buf[0..nr_input]); - if nr_input > 0 { - trace!( - "Connection buffered {} bytes from pipe ({} total, ptr = {})", - nr_input, - self.socket_out_buf.len(), - self.socket_out_ptr - ); - } + test_debug!( + "Connection buffered {} bytes from pipe ({} total, ptr = {}, blocked = {})", + nr_input, + self.socket_out_buf.len(), + self.socket_out_ptr, + blocked + ); nr_input } None => { @@ -1124,7 +1138,7 @@ impl ConnectionOutbox

{ self.socket_out_ptr += num_written; - trace!( + test_debug!( "Connection wrote {} bytes to socket (buffer len = {}, ptr = {})", num_written, self.socket_out_buf.len(), @@ -1145,7 +1159,7 @@ impl ConnectionOutbox

{ } } - trace!( + test_debug!( "Connection send_bytes finished: blocked = {}, disconnected = {}", blocked, disconnected diff --git a/src/net/db.rs b/src/net/db.rs index 2136544669..093da75e73 100644 --- a/src/net/db.rs +++ b/src/net/db.rs @@ -161,11 +161,12 @@ impl LocalPeer { let addr = addrbytes; let port = port; - let services = ServiceFlags::RELAY; + let services = (ServiceFlags::RELAY as u16) | (ServiceFlags::RPC as u16); info!( - "Will be authenticating p2p messages with public key: {}", - Secp256k1PublicKey::from_private(&pkey).to_hex() + "Will be authenticating p2p messages with the following"; + "public key" => &Secp256k1PublicKey::from_private(&pkey).to_hex(), + "services" => &to_hex(&(services as u16).to_be_bytes()) ); LocalPeer { @@ -322,7 +323,6 @@ const PEERDB_INITIAL_SCHEMA: &'static [&'static str] = &[ PRIMARY KEY(slot) );"#, - "CREATE INDEX peer_address_index ON frontier(network_id,addrbytes,port);", r#" CREATE TABLE asn4( prefix INTEGER NOT NULL, @@ -358,6 +358,9 @@ const PEERDB_INITIAL_SCHEMA: &'static [&'static str] = &[ );"#, ]; +const PEERDB_INDEXES: &'static [&'static str] = + &["CREATE INDEX IF NOT EXISTS peer_address_index ON frontier(network_id,addrbytes,port);"]; + #[derive(Debug)] pub struct PeerDB { pub conn: Connection, @@ -438,6 +441,16 @@ impl PeerDB { tx.commit().map_err(db_error::SqliteError)?; + self.add_indexes()?; + Ok(()) + } + + fn add_indexes(&mut self) -> Result<(), db_error> { + let tx = self.tx_begin()?; + for row_text in PEERDB_INDEXES { + tx.execute_batch(row_text).map_err(db_error::SqliteError)?; + } + tx.commit()?; Ok(()) } @@ -469,7 +482,7 @@ impl PeerDB { } fn reset_allows<'a>(tx: &mut Transaction<'a>) -> Result<(), db_error> { - tx.execute("UPDATE frontier SET allowed = -1", NO_PARAMS) + tx.execute("UPDATE frontier SET allowed = 0", NO_PARAMS) .map_err(db_error::SqliteError)?; Ok(()) } @@ -587,6 +600,9 @@ impl PeerDB { tx.commit()?; } } + if readwrite { + db.add_indexes()?; + } Ok(db) } @@ -1423,7 +1439,10 @@ mod test { ); assert_eq!(local_peer.port, NETWORK_P2P_PORT); assert_eq!(local_peer.addrbytes, PeerAddress::from_ipv4(127, 0, 0, 1)); - assert_eq!(local_peer.services, ServiceFlags::RELAY as u16); + assert_eq!( + local_peer.services, + (ServiceFlags::RELAY as u16) | (ServiceFlags::RPC as u16) + ); } #[test] @@ -2301,7 +2320,7 @@ mod test { assert_eq!(n1.denied, i64::MAX); assert_eq!(n2.denied, 0); // refreshed; no longer denied - assert_eq!(n1.allowed, -1); - assert_eq!(n2.allowed, -1); + assert_eq!(n1.allowed, 0); + assert_eq!(n2.allowed, 0); } } diff --git a/src/net/dns.rs b/src/net/dns.rs index 50617098b7..1f37ceb1a5 100644 --- a/src/net/dns.rs +++ b/src/net/dns.rs @@ -173,6 +173,7 @@ impl DNSResolver { if addrs.len() == 0 { return DNSResponse::error(req, "DNS resolve error: got zero addresses".to_string()); } + test_debug!("{}:{} resolved to {:?}", &req.host, req.port, &addrs); DNSResponse::new(req, Ok(addrs)) } diff --git a/src/net/download.rs b/src/net/download.rs index 100521f82f..d9271d6275 100644 --- a/src/net/download.rs +++ b/src/net/download.rs @@ -889,13 +889,16 @@ impl BlockDownloader { } /// Set a hint that a block is now available from a remote peer, if we're idling or we're ahead - /// of the given height. + /// of the given height. If force is true, then always restart the download scan at the target + /// sortition, even if we're in the middle of downloading. pub fn hint_block_sortition_height_available( &mut self, block_sortition_height: u64, ibd: bool, + force: bool, ) -> () { - if (ibd && self.state == BlockDownloaderState::DNSLookupBegin) + if force + || (ibd && self.state == BlockDownloaderState::DNSLookupBegin) || (self.empty_block_download_passes > 0 || block_sortition_height < self.block_sortition_height + 1) { @@ -921,13 +924,16 @@ impl BlockDownloader { } /// Set a hint that a confirmed microblock stream is now available from a remote peer, if we're idling or we're ahead - /// of the given height. + /// of the given height. If force is true, then always restart the download scan at the target + /// sortition, even if we're in the middle of downloading. pub fn hint_microblock_sortition_height_available( &mut self, mblock_sortition_height: u64, ibd: bool, + force: bool, ) -> () { - if (ibd && self.state == BlockDownloaderState::DNSLookupBegin) + if force + || (ibd && self.state == BlockDownloaderState::DNSLookupBegin) || (self.empty_microblock_download_passes > 0 || mblock_sortition_height < self.microblock_sortition_height + 1) { @@ -953,8 +959,8 @@ impl BlockDownloader { /// Set a hint that we should re-scan for blocks pub fn hint_download_rescan(&mut self, target_sortition_height: u64, ibd: bool) -> () { - self.hint_block_sortition_height_available(target_sortition_height, ibd); - self.hint_microblock_sortition_height_available(target_sortition_height, ibd); + self.hint_block_sortition_height_available(target_sortition_height, ibd, false); + self.hint_microblock_sortition_height_available(target_sortition_height, ibd, false); } // are we doing the initial block download? @@ -1222,6 +1228,13 @@ impl PeerNetwork { for (i, (consensus_hash, block_hash_opt, mut neighbors)) in availability.drain(..).enumerate() { + test_debug!( + "{:?}: consider availability of {}/{:?}", + &self.local_peer, + &consensus_hash, + &block_hash_opt + ); + if (i as u64) >= scan_batch_size { // we may have loaded scan_batch_size + 1 so we can find the child block for // microblocks, but we don't have to request this block's data either way. @@ -1242,11 +1255,9 @@ impl PeerNetwork { StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash); if downloader.is_inflight(&index_block_hash, microblocks) { // we already asked for this block or microblock stream - test_debug!( + debug!( "{:?}: Already in-flight: {}/{}", - &self.local_peer, - &consensus_hash, - &block_hash + &self.local_peer, &consensus_hash, &block_hash ); continue; } @@ -1887,11 +1898,14 @@ impl PeerNetwork { }) } - fn connect_or_send_http_request( + /// Send a (non-blocking) HTTP request to a remote peer. + /// Returns the event ID on success. + pub fn connect_or_send_http_request( &mut self, data_url: UrlString, addr: SocketAddr, request: HttpRequestType, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, ) -> Result { PeerNetwork::with_network_state(self, |ref mut network, ref mut network_state| { @@ -1908,7 +1922,7 @@ impl PeerNetwork { match http.get_conversation_and_socket(event_id) { (Some(ref mut convo), Some(ref mut socket)) => { convo.send_request(request)?; - HttpPeer::saturate_http_socket(socket, convo, chainstate)?; + HttpPeer::saturate_http_socket(socket, convo, mempool, chainstate)?; Ok(event_id) } (_, _) => { @@ -1933,6 +1947,7 @@ impl PeerNetwork { network: &mut PeerNetwork, dns_lookups: &HashMap>>, requestables: &mut VecDeque, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, ) -> Option<(T, usize)> { loop { @@ -1955,6 +1970,7 @@ impl PeerNetwork { requestable.get_url().clone(), addr.clone(), request, + mempool, chainstate, ) { Ok(handle) => { @@ -1996,6 +2012,7 @@ impl PeerNetwork { /// Start fetching blocks pub fn block_getblocks_begin( &mut self, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, ) -> Result<(), net_error> { test_debug!("{:?}: block_getblocks_begin", &self.local_peer); @@ -2009,6 +2026,7 @@ impl PeerNetwork { network, &downloader.dns_lookups, keys, + mempool, chainstate, ) { Some((key, handle)) => { @@ -2042,6 +2060,7 @@ impl PeerNetwork { /// Proceed to get microblocks pub fn block_getmicroblocks_begin( &mut self, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, ) -> Result<(), net_error> { test_debug!("{:?}: block_getmicroblocks_begin", &self.local_peer); @@ -2055,6 +2074,7 @@ impl PeerNetwork { network, &downloader.dns_lookups, keys, + mempool, chainstate, ) { Some((key, handle)) => { @@ -2365,6 +2385,7 @@ impl PeerNetwork { pub fn download_blocks( &mut self, sortdb: &SortitionDB, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, dns_client: &mut DNSClient, ibd: bool, @@ -2466,13 +2487,13 @@ impl PeerNetwork { self.block_dns_lookups_try_finish(dns_client)?; } BlockDownloaderState::GetBlocksBegin => { - self.block_getblocks_begin(chainstate)?; + self.block_getblocks_begin(mempool, chainstate)?; } BlockDownloaderState::GetBlocksFinish => { self.block_getblocks_try_finish()?; } BlockDownloaderState::GetMicroblocksBegin => { - self.block_getmicroblocks_begin(chainstate)?; + self.block_getmicroblocks_begin(mempool, chainstate)?; } BlockDownloaderState::GetMicroblocksFinish => { self.block_getmicroblocks_try_finish()?; @@ -2858,6 +2879,7 @@ pub mod test { sortdb, chainstate, mempool, + false, None, None, ) diff --git a/src/net/http.rs b/src/net/http.rs index 04ea9f70dd..3424e9f38f 100644 --- a/src/net/http.rs +++ b/src/net/http.rs @@ -41,10 +41,10 @@ use chainstate::burn::ConsensusHash; use chainstate::stacks::{StacksBlock, StacksMicroblock, StacksPublicKey, StacksTransaction}; use deps::httparse; use net::atlas::Attachment; -use net::CallReadOnlyRequestBody; use net::ClientError; use net::Error as net_error; use net::Error::ClarityError; +use net::ExtendedStacksHeader; use net::HttpContentType; use net::HttpRequestMetadata; use net::HttpRequestPreamble; @@ -53,6 +53,7 @@ use net::HttpResponseMetadata; use net::HttpResponsePreamble; use net::HttpResponseType; use net::HttpVersion; +use net::MemPoolSyncData; use net::MessageSequence; use net::NeighborAddress; use net::PeerAddress; @@ -65,7 +66,9 @@ use net::UnconfirmedTransactionStatus; use net::HTTP_PREAMBLE_MAX_ENCODED_SIZE; use net::HTTP_PREAMBLE_MAX_NUM_HEADERS; use net::HTTP_REQUEST_ID_RESERVED; +use net::MAX_HEADERS; use net::MAX_MICROBLOCKS_UNCONFIRMED; +use net::{CallReadOnlyRequestBody, TipRequest}; use net::{GetAttachmentResponse, GetAttachmentsInvResponse, PostTransactionRequestBody}; use util::hash::hex_bytes; use util::hash::to_hex; @@ -87,7 +90,7 @@ use crate::codec::{ read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, MAX_PAYLOAD_LEN, }; -use crate::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockId}; +use crate::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockHeader, StacksBlockId}; use super::FeeRateEstimateRequestBody; @@ -95,6 +98,7 @@ lazy_static! { static ref PATH_GETINFO: Regex = Regex::new(r#"^/v2/info$"#).unwrap(); static ref PATH_GETPOXINFO: Regex = Regex::new(r#"^/v2/pox$"#).unwrap(); static ref PATH_GETNEIGHBORS: Regex = Regex::new(r#"^/v2/neighbors$"#).unwrap(); + static ref PATH_GETHEADERS: Regex = Regex::new(r#"^/v2/headers/([0-9]+)$"#).unwrap(); static ref PATH_GETBLOCK: Regex = Regex::new(r#"^/v2/blocks/([0-9a-f]{64})$"#).unwrap(); static ref PATH_GETMICROBLOCKS_INDEXED: Regex = Regex::new(r#"^/v2/microblocks/([0-9a-f]{64})$"#).unwrap(); @@ -113,6 +117,11 @@ lazy_static! { *PRINCIPAL_DATA_REGEX )) .unwrap(); + static ref PATH_GET_DATA_VAR: Regex = Regex::new(&format!( + "^/v2/data_var/(?P

{})/(?P{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX, *CONTRACT_NAME_REGEX, *CLARITY_NAME_REGEX + )) + .unwrap(); static ref PATH_GET_MAP_ENTRY: Regex = Regex::new(&format!( "^/v2/map_entry/(?P
{})/(?P{})/(?P{})$", *STANDARD_PRINCIPAL_REGEX, *CONTRACT_NAME_REGEX, *CLARITY_NAME_REGEX @@ -142,6 +151,8 @@ lazy_static! { static ref PATH_GET_ATTACHMENTS_INV: Regex = Regex::new("^/v2/attachments/inv$").unwrap(); static ref PATH_GET_ATTACHMENT: Regex = Regex::new(r#"^/v2/attachments/([0-9a-f]{40})$"#).unwrap(); + static ref PATH_POST_MEMPOOL_QUERY: Regex = + Regex::new(r#"^/v2/mempool/query$"#).unwrap(); static ref PATH_OPTIONS_WILDCARD: Regex = Regex::new("^/v2/.{0,4096}$").unwrap(); } @@ -1458,6 +1469,7 @@ impl HttpRequestType { &PATH_GETNEIGHBORS, &HttpRequestType::parse_getneighbors, ), + ("GET", &PATH_GETHEADERS, &HttpRequestType::parse_getheaders), ("GET", &PATH_GETBLOCK, &HttpRequestType::parse_getblock), ( "GET", @@ -1500,6 +1512,11 @@ impl HttpRequestType { &PATH_GET_ACCOUNT, &HttpRequestType::parse_get_account, ), + ( + "GET", + &PATH_GET_DATA_VAR, + &HttpRequestType::parse_get_data_var, + ), ( "POST", &PATH_GET_MAP_ENTRY, @@ -1545,6 +1562,11 @@ impl HttpRequestType { &PATH_GET_ATTACHMENTS_INV, &HttpRequestType::parse_get_attachments_inv, ), + ( + "POST", + &PATH_POST_MEMPOOL_QUERY, + &HttpRequestType::parse_post_mempool_query, + ), ]; // use url::Url to parse path and query string @@ -1667,9 +1689,8 @@ impl HttpRequestType { )) } - /// check whether the given option query string - /// sets proof=0 (setting proof to false). - /// Defaults to _true_ + /// Check whether the given option query string sets proof=0 (setting proof to false). + /// Defaults to true. fn get_proof_query(query: Option<&str>) -> bool { let no_proof = if let Some(query_string) = query { form_urlencoded::parse(query_string.as_bytes()) @@ -1685,7 +1706,7 @@ impl HttpRequestType { /// get the chain tip optional query argument (`tip`) /// Take the first value we can parse. - fn get_chain_tip_query(query: Option<&str>) -> Option { + fn get_chain_tip_query(query: Option<&str>) -> TipRequest { match query { Some(query_string) => { for (key, value) in form_urlencoded::parse(query_string.as_bytes()) { @@ -1693,8 +1714,32 @@ impl HttpRequestType { continue; } + if value == "latest" { + return TipRequest::UseLatestUnconfirmedTip; + } if let Ok(tip) = StacksBlockId::from_hex(&value) { - return Some(tip); + return TipRequest::SpecificTip(tip); + } + } + return TipRequest::UseLatestAnchoredTip; + } + None => { + return TipRequest::UseLatestAnchoredTip; + } + } + } + + /// get the mempool page ID optional query argument (`page_id`) + /// Take the first value we can parse. + fn get_mempool_page_id_query(query: Option<&str>) -> Option { + match query { + Some(query_string) => { + for (key, value) in form_urlencoded::parse(query_string.as_bytes()) { + if key != "page_id" { + continue; + } + if let Ok(page_id) = Txid::from_hex(&value) { + return Some(page_id); } } return None; @@ -1733,6 +1778,42 @@ impl HttpRequestType { )) } + fn parse_get_data_var( + _protocol: &mut StacksHttp, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _fd: &mut R, + ) -> Result { + let content_len = preamble.get_content_length(); + if content_len != 0 { + return Err(net_error::DeserializeError(format!( + "Invalid Http request: invalid body length for GetDataVar ({})", + content_len + ))); + } + + let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { + net_error::DeserializeError("Failed to parse contract address".into()) + })?; + let contract_name = ContractName::try_from(captures["contract"].to_string()) + .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; + let var_name = ClarityName::try_from(captures["varname"].to_string()) + .map_err(|_e| net_error::DeserializeError("Failed to parse data var name".into()))?; + + let with_proof = HttpRequestType::get_proof_query(query); + let tip = HttpRequestType::get_chain_tip_query(query); + + Ok(HttpRequestType::GetDataVar( + HttpRequestMetadata::from_preamble(preamble), + contract_addr, + contract_name, + var_name, + tip, + with_proof, + )) + } + fn parse_get_map_entry( _protocol: &mut StacksHttp, preamble: &HttpRequestPreamble, @@ -1760,7 +1841,7 @@ impl HttpRequestType { let contract_name = ContractName::try_from(captures["contract"].to_string()) .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; let map_name = ClarityName::try_from(captures["map"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; + .map_err(|_e| net_error::DeserializeError("Failed to parse map name".into()))?; let value_hex: String = serde_json::from_reader(fd) .map_err(|_e| net_error::DeserializeError("Failed to parse JSON body".into()))?; @@ -1930,6 +2011,39 @@ impl HttpRequestType { )) } + fn parse_getheaders( + _protocol: &mut StacksHttp, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _fd: &mut R, + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(net_error::DeserializeError( + "Invalid Http request: expected 0-length body for GetBlock".to_string(), + )); + } + + let quantity_str = captures + .get(1) + .ok_or(net_error::DeserializeError( + "Failed to match path to reward cycle group".to_string(), + ))? + .as_str(); + + let quantity: u64 = quantity_str + .parse() + .map_err(|_| net_error::DeserializeError("Failed to parse reward cycle".to_string()))?; + + let tip = HttpRequestType::get_chain_tip_query(query); + + Ok(HttpRequestType::GetHeaders( + HttpRequestMetadata::from_preamble(preamble), + quantity, + tip, + )) + } + fn parse_getblock( _protocol: &mut StacksHttp, preamble: &HttpRequestPreamble, @@ -2459,6 +2573,53 @@ impl HttpRequestType { )) } + fn parse_post_mempool_query( + _protocol: &mut StacksHttp, + preamble: &HttpRequestPreamble, + _regex: &Captures, + query: Option<&str>, + fd: &mut R, + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(net_error::DeserializeError( + "Invalid Http request: expected non-empty body".to_string(), + )); + } + + if preamble.get_content_length() > MAX_PAYLOAD_LEN { + return Err(net_error::DeserializeError( + "Invalid Http request: MemPoolQuery body is too big".to_string(), + )); + } + + // content-type must be given, and must be application/octet-stream + match preamble.content_type { + None => { + return Err(net_error::DeserializeError( + "Missing Content-Type for MemPoolQuery".to_string(), + )); + } + Some(ref c) => { + if *c != HttpContentType::Bytes { + return Err(net_error::DeserializeError( + "Wrong Content-Type for MemPoolQuery; expected application/octet-stream" + .to_string(), + )); + } + } + }; + + let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); + let mempool_query = MemPoolSyncData::consensus_deserialize(&mut bound_fd)?; + let page_id_opt = HttpRequestType::get_mempool_page_id_query(query); + + Ok(HttpRequestType::MemPoolQuery( + HttpRequestMetadata::from_preamble(preamble), + mempool_query, + page_id_opt, + )) + } + fn parse_options_preflight( _protocol: &mut StacksHttp, preamble: &HttpRequestPreamble, @@ -2475,8 +2636,9 @@ impl HttpRequestType { pub fn metadata(&self) -> &HttpRequestMetadata { match *self { HttpRequestType::GetInfo(ref md) => md, - HttpRequestType::GetPoxInfo(ref md, _) => md, + HttpRequestType::GetPoxInfo(ref md, ..) => md, HttpRequestType::GetNeighbors(ref md) => md, + HttpRequestType::GetHeaders(ref md, ..) => md, HttpRequestType::GetBlock(ref md, _) => md, HttpRequestType::GetMicroblocksIndexed(ref md, _) => md, HttpRequestType::GetMicroblocksConfirmed(ref md, _) => md, @@ -2486,6 +2648,7 @@ impl HttpRequestType { HttpRequestType::PostBlock(ref md, ..) => md, HttpRequestType::PostMicroblock(ref md, ..) => md, HttpRequestType::GetAccount(ref md, ..) => md, + HttpRequestType::GetDataVar(ref md, ..) => md, HttpRequestType::GetMapEntry(ref md, ..) => md, HttpRequestType::GetTransferCost(ref md) => md, HttpRequestType::GetContractABI(ref md, ..) => md, @@ -2495,6 +2658,7 @@ impl HttpRequestType { HttpRequestType::OptionsPreflight(ref md, ..) => md, HttpRequestType::GetAttachmentsInv(ref md, ..) => md, HttpRequestType::GetAttachment(ref md, ..) => md, + HttpRequestType::MemPoolQuery(ref md, ..) => md, HttpRequestType::FeeRateEstimate(ref md, _, _) => md, HttpRequestType::ClientError(ref md, ..) => md, } @@ -2503,8 +2667,9 @@ impl HttpRequestType { pub fn metadata_mut(&mut self) -> &mut HttpRequestMetadata { match *self { HttpRequestType::GetInfo(ref mut md) => md, - HttpRequestType::GetPoxInfo(ref mut md, _) => md, + HttpRequestType::GetPoxInfo(ref mut md, ..) => md, HttpRequestType::GetNeighbors(ref mut md) => md, + HttpRequestType::GetHeaders(ref mut md, ..) => md, HttpRequestType::GetBlock(ref mut md, _) => md, HttpRequestType::GetMicroblocksIndexed(ref mut md, _) => md, HttpRequestType::GetMicroblocksConfirmed(ref mut md, _) => md, @@ -2514,6 +2679,7 @@ impl HttpRequestType { HttpRequestType::PostBlock(ref mut md, ..) => md, HttpRequestType::PostMicroblock(ref mut md, ..) => md, HttpRequestType::GetAccount(ref mut md, ..) => md, + HttpRequestType::GetDataVar(ref mut md, ..) => md, HttpRequestType::GetMapEntry(ref mut md, ..) => md, HttpRequestType::GetTransferCost(ref mut md) => md, HttpRequestType::GetContractABI(ref mut md, ..) => md, @@ -2523,29 +2689,43 @@ impl HttpRequestType { HttpRequestType::OptionsPreflight(ref mut md, ..) => md, HttpRequestType::GetAttachmentsInv(ref mut md, ..) => md, HttpRequestType::GetAttachment(ref mut md, ..) => md, + HttpRequestType::MemPoolQuery(ref mut md, ..) => md, HttpRequestType::FeeRateEstimate(ref mut md, _, _) => md, HttpRequestType::ClientError(ref mut md, ..) => md, } } - fn make_query_string(tip_opt: Option<&StacksBlockId>, with_proof: bool) -> String { - if let Some(tip) = tip_opt { - format!("?tip={}{}", tip, if with_proof { "" } else { "&proof=0" }) - } else if !with_proof { - format!("?proof=0") - } else { - "".to_string() + fn make_tip_query_string(tip_req: &TipRequest, with_proof: bool) -> String { + match tip_req { + TipRequest::UseLatestUnconfirmedTip => { + format!("?tip=latest{}", if with_proof { "" } else { "&proof=0" }) + } + TipRequest::SpecificTip(tip) => { + format!("?tip={}{}", tip, if with_proof { "" } else { "&proof=0" }) + } + TipRequest::UseLatestAnchoredTip => { + if !with_proof { + format!("?proof=0") + } else { + "".to_string() + } + } } } pub fn request_path(&self) -> String { match self { HttpRequestType::GetInfo(_md) => "/v2/info".to_string(), - HttpRequestType::GetPoxInfo(_md, tip_opt) => format!( + HttpRequestType::GetPoxInfo(_md, tip_req) => format!( "/v2/pox{}", - HttpRequestType::make_query_string(tip_opt.as_ref(), true) + HttpRequestType::make_tip_query_string(tip_req, true) ), HttpRequestType::GetNeighbors(_md) => "/v2/neighbors".to_string(), + HttpRequestType::GetHeaders(_md, quantity, tip_req) => format!( + "/v2/headers/{}{}", + quantity, + HttpRequestType::make_tip_query_string(tip_req, true) + ), HttpRequestType::GetBlock(_md, block_hash) => { format!("/v2/blocks/{}", block_hash.to_hex()) } @@ -2565,14 +2745,30 @@ impl HttpRequestType { } HttpRequestType::PostTransaction(_md, ..) => "/v2/transactions".to_string(), HttpRequestType::PostBlock(_md, ch, ..) => format!("/v2/blocks/upload/{}", &ch), - HttpRequestType::PostMicroblock(_md, _, tip_opt) => format!( + HttpRequestType::PostMicroblock(_md, _, tip_req) => format!( "/v2/microblocks{}", - HttpRequestType::make_query_string(tip_opt.as_ref(), true) + HttpRequestType::make_tip_query_string(tip_req, true) ), - HttpRequestType::GetAccount(_md, principal, tip_opt, with_proof) => format!( - "/v2/accounts/{}{}", - &principal.to_string(), - HttpRequestType::make_query_string(tip_opt.as_ref(), *with_proof) + HttpRequestType::GetAccount(_md, principal, tip_req, with_proof) => { + format!( + "/v2/accounts/{}{}", + &principal.to_string(), + HttpRequestType::make_tip_query_string(tip_req, *with_proof,) + ) + } + HttpRequestType::GetDataVar( + _md, + contract_addr, + contract_name, + var_name, + tip_req, + with_proof, + ) => format!( + "/v2/data_var/{}/{}/{}{}", + &contract_addr.to_string(), + contract_name.as_str(), + var_name.as_str(), + HttpRequestType::make_tip_query_string(tip_req, *with_proof) ), HttpRequestType::GetMapEntry( _md, @@ -2580,40 +2776,40 @@ impl HttpRequestType { contract_name, map_name, _key, - tip_opt, + tip_req, with_proof, ) => format!( "/v2/map_entry/{}/{}/{}{}", &contract_addr.to_string(), contract_name.as_str(), map_name.as_str(), - HttpRequestType::make_query_string(tip_opt.as_ref(), *with_proof) + HttpRequestType::make_tip_query_string(tip_req, *with_proof) ), HttpRequestType::GetTransferCost(_md) => "/v2/fees/transfer".into(), - HttpRequestType::GetContractABI(_, contract_addr, contract_name, tip_opt) => format!( + HttpRequestType::GetContractABI(_, contract_addr, contract_name, tip_req) => format!( "/v2/contracts/interface/{}/{}{}", contract_addr, contract_name.as_str(), - HttpRequestType::make_query_string(tip_opt.as_ref(), true) + HttpRequestType::make_tip_query_string(tip_req, true,) ), HttpRequestType::GetContractSrc( _, contract_addr, contract_name, - tip_opt, + tip_req, with_proof, ) => format!( "/v2/contracts/source/{}/{}{}", contract_addr, contract_name.as_str(), - HttpRequestType::make_query_string(tip_opt.as_ref(), *with_proof) + HttpRequestType::make_tip_query_string(tip_req, *with_proof) ), HttpRequestType::GetIsTraitImplemented( _, contract_addr, contract_name, trait_id, - tip_opt, + tip_req, ) => format!( "/v2/traits/{}/{}/{}/{}/{}{}", contract_addr, @@ -2621,7 +2817,7 @@ impl HttpRequestType { trait_id.name.to_string(), StacksAddress::from(trait_id.clone().contract_identifier.issuer), trait_id.contract_identifier.name.as_str(), - HttpRequestType::make_query_string(tip_opt.as_ref(), true) + HttpRequestType::make_tip_query_string(tip_req, true) ), HttpRequestType::CallReadOnlyFunction( _, @@ -2630,13 +2826,13 @@ impl HttpRequestType { _, func_name, _, - tip_opt, + tip_req, ) => format!( "/v2/contracts/call-read/{}/{}/{}{}", contract_addr, contract_name.as_str(), func_name.as_str(), - HttpRequestType::make_query_string(tip_opt.as_ref(), true) + HttpRequestType::make_tip_query_string(tip_req, true) ), HttpRequestType::OptionsPreflight(_md, path) => path.to_string(), HttpRequestType::GetAttachmentsInv(_md, index_block_hash, pages_indexes) => { @@ -2657,6 +2853,12 @@ impl HttpRequestType { HttpRequestType::GetAttachment(_, content_hash) => { format!("/v2/attachments/{}", to_hex(&content_hash.0[..])) } + HttpRequestType::MemPoolQuery(_, _, page_id_opt) => match page_id_opt { + Some(page_id) => { + format!("/v2/mempool/query?page_id={}", page_id) + } + None => "/v2/mempool/query".to_string(), + }, HttpRequestType::FeeRateEstimate(_, _, _) => self.get_path().to_string(), HttpRequestType::ClientError(_md, e) => match e { ClientError::NotFound(path) => path.to_string(), @@ -2670,6 +2872,7 @@ impl HttpRequestType { HttpRequestType::GetInfo(..) => "/v2/info", HttpRequestType::GetPoxInfo(..) => "/v2/pox", HttpRequestType::GetNeighbors(..) => "/v2/neighbors", + HttpRequestType::GetHeaders(..) => "/v2/headers/:height", HttpRequestType::GetBlock(..) => "/v2/blocks/:hash", HttpRequestType::GetMicroblocksIndexed(..) => "/v2/microblocks/:hash", HttpRequestType::GetMicroblocksConfirmed(..) => "/v2/microblocks/confirmed/:hash", @@ -2681,6 +2884,7 @@ impl HttpRequestType { HttpRequestType::PostBlock(..) => "/v2/blocks/upload/:block", HttpRequestType::PostMicroblock(..) => "/v2/microblocks", HttpRequestType::GetAccount(..) => "/v2/accounts/:principal", + HttpRequestType::GetDataVar(..) => "/v2/data_var/:principal/:contract_name/:var_name", HttpRequestType::GetMapEntry(..) => "/v2/map_entry/:principal/:contract_name/:map_name", HttpRequestType::GetTransferCost(..) => "/v2/fees/transfer", HttpRequestType::GetContractABI(..) => { @@ -2693,6 +2897,7 @@ impl HttpRequestType { HttpRequestType::GetAttachmentsInv(..) => "/v2/attachments/inv", HttpRequestType::GetAttachment(..) => "/v2/attachments/:hash", HttpRequestType::GetIsTraitImplemented(..) => "/v2/traits/:principal/:contract_name", + HttpRequestType::MemPoolQuery(..) => "/v2/mempool/query", HttpRequestType::FeeRateEstimate(_, _, _) => "/v2/fees/transaction", HttpRequestType::OptionsPreflight(..) | HttpRequestType::ClientError(..) => "/", } @@ -2849,6 +3054,22 @@ impl HttpRequestType { fd.write_all(&request_body_bytes) .map_err(net_error::WriteError)?; } + HttpRequestType::MemPoolQuery(md, query, ..) => { + let request_body_bytes = query.serialize_to_vec(); + HttpRequestPreamble::new_serialized( + fd, + &md.version, + "POST", + &self.request_path(), + &md.peer, + md.keep_alive, + Some(request_body_bytes.len() as u32), + Some(&HttpContentType::Bytes), + empty_headers, + )?; + fd.write_all(&request_body_bytes) + .map_err(net_error::WriteError)?; + } other_type => { let md = other_type.metadata(); let request_path = other_type.request_path(); @@ -2909,10 +3130,13 @@ impl HttpResponseType { )); } - if preamble.content_type != HttpContentType::Text { - return Err(net_error::DeserializeError( - "Invalid error response: expected text/plain".to_string(), - )); + if preamble.content_type != HttpContentType::Text + && preamble.content_type != HttpContentType::JSON + { + return Err(net_error::DeserializeError(format!( + "Invalid error response: expected text/plain or application/json, got {:?}", + &preamble.content_type + ))); } let mut error_text = String::new(); @@ -3094,7 +3318,9 @@ impl HttpResponseType { (&PATH_GETINFO, &HttpResponseType::parse_peerinfo), (&PATH_GETPOXINFO, &HttpResponseType::parse_poxinfo), (&PATH_GETNEIGHBORS, &HttpResponseType::parse_neighbors), + (&PATH_GETHEADERS, &HttpResponseType::parse_headers), (&PATH_GETBLOCK, &HttpResponseType::parse_block), + (&PATH_GET_DATA_VAR, &HttpResponseType::parse_get_data_var), (&PATH_GET_MAP_ENTRY, &HttpResponseType::parse_get_map_entry), ( &PATH_GETMICROBLOCKS_INDEXED, @@ -3146,6 +3372,10 @@ impl HttpResponseType { &PATH_GET_ATTACHMENTS_INV, &HttpResponseType::parse_get_attachments_inv, ), + ( + &PATH_POST_MEMPOOL_QUERY, + &HttpResponseType::parse_post_mempool_query, + ), ]; // use url::Url to parse path and query string @@ -3239,6 +3469,21 @@ impl HttpResponseType { )) } + fn parse_headers( + _protocol: &mut StacksHttp, + request_version: HttpVersion, + preamble: &HttpResponsePreamble, + fd: &mut R, + len_hint: Option, + ) -> Result { + let headers: Vec = + HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; + Ok(HttpResponseType::Headers( + HttpResponseMetadata::from_preamble(request_version, preamble), + headers, + )) + } + fn parse_block( _protocol: &mut StacksHttp, request_version: HttpVersion, @@ -3284,6 +3529,21 @@ impl HttpResponseType { )) } + fn parse_get_data_var( + _protocol: &mut StacksHttp, + request_version: HttpVersion, + preamble: &HttpResponsePreamble, + fd: &mut R, + len_hint: Option, + ) -> Result { + let data_var = + HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; + Ok(HttpResponseType::GetDataVar( + HttpResponseMetadata::from_preamble(request_version, preamble), + data_var, + )) + } + fn parse_get_map_entry( _protocol: &mut StacksHttp, request_version: HttpVersion, @@ -3515,6 +3775,142 @@ impl HttpResponseType { )) } + /// Read the trailing page ID from a transaction stream + fn parse_mempool_query_page_id( + pos: usize, + retry_reader: &mut RetryReader<'_, R>, + ) -> Result, net_error> { + // possibly end-of-transactions, in which case, the last 32 bytes should be + // a page ID. Expect end-of-stream after this. + retry_reader.set_position(pos); + let next_page: Txid = match read_next(retry_reader) { + Ok(txid) => txid, + Err(e) => match e { + codec_error::ReadError(ref ioe) => match ioe.kind() { + io::ErrorKind::UnexpectedEof => { + if pos == retry_reader.position() { + // this is fine -- the node didn't get another page + return Ok(None); + } else { + // partial data -- corrupt stream + test_debug!("Unexpected EOF: {} != {}", pos, retry_reader.position()); + return Err(e.into()); + } + } + _ => { + return Err(e.into()); + } + }, + e => { + return Err(e.into()); + } + }, + }; + + test_debug!("Read page_id {:?}", &next_page); + Ok(Some(next_page)) + } + + /// Decode a transaction stream, returned from /v2/mempool/query. + /// The wire format is a list of transactions (no SIP-003 length prefix), followed by an + /// optional 32-byte page ID. Obtain both the transactions and page ID, if it exists. + pub fn decode_tx_stream( + fd: &mut R, + len_hint: Option, + ) -> Result<(Vec, Option), net_error> { + // The wire format is `tx, tx, tx, tx, .., tx, txid`. + // The last 32 bytes are the page ID for the next mempool query. + // NOTE: there will be no length prefix on this. + let mut txs: Vec = vec![]; + let max_len = len_hint.unwrap_or(MAX_MESSAGE_LEN as usize) as u64; + let mut bound_reader = BoundReader::from_reader(fd, max_len); + let mut retry_reader = RetryReader::new(&mut bound_reader); + let mut page_id = None; + let mut expect_eof = false; + + loop { + let pos = retry_reader.position(); + let next_msg: Result = read_next(&mut retry_reader); + match next_msg { + Ok(tx) => { + if expect_eof { + // this should have failed + test_debug!("Expected EOF; got transaction {}", tx.txid()); + return Err(net_error::ExpectedEndOfStream); + } + + test_debug!("Read transaction {}", tx.txid()); + txs.push(tx); + Ok(()) + } + Err(e) => match e { + codec_error::ReadError(ref ioe) => match ioe.kind() { + io::ErrorKind::UnexpectedEof => { + if expect_eof { + if pos != retry_reader.position() { + // read partial data. The stream is corrupt. + test_debug!( + "Expected EOF; stream advanced from {} to {}", + pos, + retry_reader.position() + ); + return Err(net_error::ExpectedEndOfStream); + } + } else { + // couldn't read a full transaction. This is possibly a page ID, whose + // 32 bytes decode to the prefix of a well-formed transaction. + test_debug!("Try to read page ID trailer after ReadError"); + page_id = HttpResponseType::parse_mempool_query_page_id( + pos, + &mut retry_reader, + )?; + } + break; + } + _ => Err(e), + }, + codec_error::DeserializeError(_msg) => { + if expect_eof { + // this should have failed due to EOF + test_debug!("Expected EOF; got DeserializeError '{}'", &_msg); + return Err(net_error::ExpectedEndOfStream); + } + + // failed to parse a transaction. This is possibly a page ID. + test_debug!("Try to read page ID trailer after ReadError"); + page_id = + HttpResponseType::parse_mempool_query_page_id(pos, &mut retry_reader)?; + + // do one more pass to make sure we're actually end-of-stream. + // otherwise, the stream itself was corrupt, since any 32 bytes is a valid + // txid and the presence of more bytes means that we simply got a bad tx + // that we couldn't decode. + expect_eof = true; + Ok(()) + } + _ => Err(e), + }, + }?; + } + + Ok((txs, page_id)) + } + + fn parse_post_mempool_query( + _protocol: &mut StacksHttp, + request_version: HttpVersion, + preamble: &HttpResponsePreamble, + fd: &mut R, + len_hint: Option, + ) -> Result { + let (txs, page_id) = HttpResponseType::decode_tx_stream(fd, len_hint)?; + Ok(HttpResponseType::MemPoolTxs( + HttpResponseMetadata::from_preamble(request_version, preamble), + page_id, + txs, + )) + } + fn error_reason(code: u16) -> &'static str { match code { 400 => "Bad Request", @@ -3554,6 +3950,8 @@ impl HttpResponseType { HttpResponseType::PeerInfo(ref md, _) => md, HttpResponseType::PoxInfo(ref md, _) => md, HttpResponseType::Neighbors(ref md, _) => md, + HttpResponseType::HeaderStream(ref md) => md, + HttpResponseType::Headers(ref md, _) => md, HttpResponseType::Block(ref md, _) => md, HttpResponseType::BlockStream(ref md) => md, HttpResponseType::Microblocks(ref md, _) => md, @@ -3562,6 +3960,7 @@ impl HttpResponseType { HttpResponseType::StacksBlockAccepted(ref md, ..) => md, HttpResponseType::MicroblockHash(ref md, _) => md, HttpResponseType::TokenTransferCost(ref md, _) => md, + HttpResponseType::GetDataVar(ref md, _) => md, HttpResponseType::GetMapEntry(ref md, _) => md, HttpResponseType::GetAccount(ref md, _) => md, HttpResponseType::GetContractABI(ref md, _) => md, @@ -3571,6 +3970,8 @@ impl HttpResponseType { HttpResponseType::UnconfirmedTransaction(ref md, _) => md, HttpResponseType::GetAttachment(ref md, _) => md, HttpResponseType::GetAttachmentsInv(ref md, _) => md, + HttpResponseType::MemPoolTxStream(ref md) => md, + HttpResponseType::MemPoolTxs(ref md, ..) => md, HttpResponseType::OptionsPreflight(ref md) => md, HttpResponseType::TransactionFeeEstimation(ref md, _) => md, // errors @@ -3676,6 +4077,10 @@ impl HttpResponseType { HttpResponsePreamble::ok_JSON_from_md(fd, md)?; HttpResponseType::send_json(protocol, md, fd, data)?; } + HttpResponseType::GetDataVar(ref md, ref var_data) => { + HttpResponsePreamble::ok_JSON_from_md(fd, md)?; + HttpResponseType::send_json(protocol, md, fd, var_data)?; + } HttpResponseType::GetMapEntry(ref md, ref map_data) => { HttpResponsePreamble::ok_JSON_from_md(fd, md)?; HttpResponseType::send_json(protocol, md, fd, map_data)?; @@ -3700,6 +4105,31 @@ impl HttpResponseType { HttpResponsePreamble::ok_JSON_from_md(fd, md)?; HttpResponseType::send_json(protocol, md, fd, zonefile_data)?; } + HttpResponseType::Headers(ref md, ref headers) => { + HttpResponsePreamble::new_serialized( + fd, + 200, + "OK", + None, + &HttpContentType::JSON, + md.request_id, + |ref mut fd| keep_alive_headers(fd, md), + )?; + HttpResponseType::send_json(protocol, md, fd, headers)?; + } + HttpResponseType::HeaderStream(ref md) => { + // only send the preamble. The caller will need to figure out how to send along + // the headers data itself. + HttpResponsePreamble::new_serialized( + fd, + 200, + "OK", + None, + &HttpContentType::JSON, + md.request_id, + |ref mut fd| keep_alive_headers(fd, md), + )?; + } HttpResponseType::Block(ref md, ref block) => { HttpResponsePreamble::new_serialized( fd, @@ -3796,6 +4226,51 @@ impl HttpResponseType { HttpResponsePreamble::ok_JSON_from_md(fd, md)?; HttpResponseType::send_json(protocol, md, fd, unconfirmed_status)?; } + HttpResponseType::MemPoolTxStream(ref md) => { + // only send the preamble. The caller will need to figure out how to send along + // the tx data itself. + HttpResponsePreamble::new_serialized( + fd, + 200, + "OK", + None, + &HttpContentType::Bytes, + md.request_id, + |ref mut fd| keep_alive_headers(fd, md), + )?; + } + HttpResponseType::MemPoolTxs(ref md, ref page_id, ref txs) => { + HttpResponsePreamble::new_serialized( + fd, + 200, + "OK", + md.content_length.clone(), + &HttpContentType::Bytes, + md.request_id, + |ref mut fd| keep_alive_headers(fd, md), + )?; + match page_id { + Some(txid) => { + if md.content_length.is_some() { + // have explicit content-length, so we can send as-is + write_next(fd, txs)?; + write_next(fd, txid)?; + Ok(()) + } else { + // no content-length, so send as chunk-encoded + let mut write_state = + HttpChunkedTransferWriterState::new(protocol.chunk_size as usize); + let mut encoder = + HttpChunkedTransferWriter::from_writer_state(fd, &mut write_state); + write_next(&mut encoder, txs)?; + write_next(&mut encoder, txid)?; + encoder.flush().map_err(codec_error::WriteError)?; + Ok(()) + } + } + None => HttpResponseType::send_bytestream(protocol, md, fd, txs), + }?; + } HttpResponseType::OptionsPreflight(ref md) => { HttpResponsePreamble::new_serialized( fd, @@ -3889,6 +4364,7 @@ impl MessageSequence for StacksHttpMessage { HttpRequestType::GetInfo(_) => "HTTP(GetInfo)", HttpRequestType::GetPoxInfo(_, _) => "HTTP(GetPoxInfo)", HttpRequestType::GetNeighbors(_) => "HTTP(GetNeighbors)", + HttpRequestType::GetHeaders(..) => "HTTP(GetHeaders)", HttpRequestType::GetBlock(_, _) => "HTTP(GetBlock)", HttpRequestType::GetMicroblocksIndexed(_, _) => "HTTP(GetMicroblocksIndexed)", HttpRequestType::GetMicroblocksConfirmed(_, _) => "HTTP(GetMicroblocksConfirmed)", @@ -3902,6 +4378,7 @@ impl MessageSequence for StacksHttpMessage { HttpRequestType::PostBlock(..) => "HTTP(PostBlock)", HttpRequestType::PostMicroblock(..) => "HTTP(PostMicroblock)", HttpRequestType::GetAccount(..) => "HTTP(GetAccount)", + HttpRequestType::GetDataVar(..) => "HTTP(GetDataVar)", HttpRequestType::GetMapEntry(..) => "HTTP(GetMapEntry)", HttpRequestType::GetTransferCost(_) => "HTTP(GetTransferCost)", HttpRequestType::GetContractABI(..) => "HTTP(GetContractABI)", @@ -3910,12 +4387,14 @@ impl MessageSequence for StacksHttpMessage { HttpRequestType::CallReadOnlyFunction(..) => "HTTP(CallReadOnlyFunction)", HttpRequestType::GetAttachment(..) => "HTTP(GetAttachment)", HttpRequestType::GetAttachmentsInv(..) => "HTTP(GetAttachmentsInv)", + HttpRequestType::MemPoolQuery(..) => "HTTP(MemPoolQuery)", HttpRequestType::OptionsPreflight(..) => "HTTP(OptionsPreflight)", HttpRequestType::ClientError(..) => "HTTP(ClientError)", HttpRequestType::FeeRateEstimate(_, _, _) => "HTTP(FeeRateEstimate)", }, StacksHttpMessage::Response(ref res) => match res { HttpResponseType::TokenTransferCost(_, _) => "HTTP(TokenTransferCost)", + HttpResponseType::GetDataVar(_, _) => "HTTP(GetDataVar)", HttpResponseType::GetMapEntry(_, _) => "HTTP(GetMapEntry)", HttpResponseType::GetAccount(_, _) => "HTTP(GetAccount)", HttpResponseType::GetContractABI(..) => "HTTP(GetContractABI)", @@ -3927,6 +4406,8 @@ impl MessageSequence for StacksHttpMessage { HttpResponseType::PeerInfo(_, _) => "HTTP(PeerInfo)", HttpResponseType::PoxInfo(_, _) => "HTTP(PeerInfo)", HttpResponseType::Neighbors(_, _) => "HTTP(Neighbors)", + HttpResponseType::Headers(..) => "HTTP(Headers)", + HttpResponseType::HeaderStream(..) => "HTTP(HeaderStream)", HttpResponseType::Block(_, _) => "HTTP(Block)", HttpResponseType::BlockStream(_) => "HTTP(BlockStream)", HttpResponseType::Microblocks(_, _) => "HTTP(Microblocks)", @@ -3935,6 +4416,8 @@ impl MessageSequence for StacksHttpMessage { HttpResponseType::StacksBlockAccepted(..) => "HTTP(StacksBlockAccepted)", HttpResponseType::MicroblockHash(_, _) => "HTTP(MicroblockHash)", HttpResponseType::UnconfirmedTransaction(_, _) => "HTTP(UnconfirmedTransaction)", + HttpResponseType::MemPoolTxStream(..) => "HTTP(MemPoolTxStream)", + HttpResponseType::MemPoolTxs(..) => "HTTP(MemPoolTxs)", HttpResponseType::OptionsPreflight(_) => "HTTP(OptionsPreflight)", HttpResponseType::BadRequestJSON(..) | HttpResponseType::BadRequest(..) => { "HTTP(400)" @@ -4095,6 +4578,37 @@ impl StacksHttp { true } + pub fn set_preamble(&mut self, preamble: &StacksHttpPreamble) -> Result<(), net_error> { + // if we already have a pending message, then this preamble cannot be processed (indicates an un-compliant client) + match preamble { + StacksHttpPreamble::Response(ref http_response_preamble) => { + // request path must have been set + if self.request_path.is_none() { + return Err(net_error::DeserializeError( + "Possible bug: did not set the request path".to_string(), + )); + } + + if http_response_preamble.is_chunked() { + // will stream this. Make sure we're not doing so already (no collisions + // allowed on in-flight request IDs!) + if self.has_pending_reply() { + test_debug!("Have pending reply already"); + return Err(net_error::InProgress); + } + + // mark as pending -- we can stream this + if !self.set_pending(http_response_preamble) { + test_debug!("Have pending reply already"); + return Err(net_error::InProgress); + } + } + } + _ => {} + } + Ok(()) + } + pub fn begin_request(&mut self, client_version: HttpVersion, request_path: String) -> () { self.request_version = Some(client_version); self.request_path = Some(request_path); @@ -4245,33 +4759,7 @@ impl ProtocolFamily for StacksHttp { let preamble_len = cursor.position() as usize; - // if we already have a pending message, then this preamble cannot be processed (indicates an un-compliant client) - match preamble { - StacksHttpPreamble::Response(ref http_response_preamble) => { - // request path must have been set - if self.request_path.is_none() { - return Err(net_error::DeserializeError( - "Possible bug: did not set the request path".to_string(), - )); - } - - if http_response_preamble.is_chunked() { - // will stream this. Make sure we're not doing so already (no collisions - // allowed on in-flight request IDs!) - if self.has_pending_reply() { - test_debug!("Have pending reply already"); - return Err(net_error::InProgress); - } - - // mark as pending -- we can stream this - if !self.set_pending(http_response_preamble) { - test_debug!("Have pending reply already"); - return Err(net_error::InProgress); - } - } - } - _ => {} - } + self.set_preamble(&preamble)?; Ok((preamble, preamble_len)) } @@ -6034,7 +6522,7 @@ mod test { "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 4\r\n\r\n\"ab\"", "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 1\r\n\r\n{", "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 1\r\n\r\na", - "HTTP/1.1 400 Bad Request\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 2\r\n\r\n{}", + "HTTP/1.1 400 Bad Request\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/octet-stream\r\nContent-length: 2\r\n\r\n{}", ]; let expected_bad_request_payload_errors = vec![ "Invalid content-type", @@ -6382,46 +6870,58 @@ mod test { #[test] fn test_http_parse_proof_tip_query() { let query_txt = "tip=7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392"; - assert_eq!( - HttpRequestType::get_chain_tip_query(Some(query_txt)).unwrap(), - StacksBlockId::from_hex( - "7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392" - ) - .unwrap() - ); + let tip_req = HttpRequestType::get_chain_tip_query(Some(query_txt)); + match tip_req { + TipRequest::SpecificTip(tip) => assert_eq!( + tip, + StacksBlockId::from_hex( + "7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392" + ) + .unwrap() + ), + _ => panic!(), + } // first parseable tip is taken let query_txt_dup = "tip=7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392&tip=03e26bd68a8722f8b3861e2058edcafde094ad059e152754986c3573306698f1"; - assert_eq!( - HttpRequestType::get_chain_tip_query(Some(query_txt_dup)).unwrap(), - StacksBlockId::from_hex( - "7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392" - ) - .unwrap() - ); + let tip_req = HttpRequestType::get_chain_tip_query(Some(query_txt)); + match tip_req { + TipRequest::SpecificTip(tip) => assert_eq!( + tip, + StacksBlockId::from_hex( + "7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392" + ) + .unwrap() + ), + _ => panic!(), + } // first parseable tip is taken let query_txt_dup = "tip=bad&tip=7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392&tip=03e26bd68a8722f8b3861e2058edcafde094ad059e152754986c3573306698f1"; - assert_eq!( - HttpRequestType::get_chain_tip_query(Some(query_txt_dup)).unwrap(), - StacksBlockId::from_hex( - "7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392" - ) - .unwrap() - ); + let tip_req = HttpRequestType::get_chain_tip_query(Some(query_txt_dup)); + match tip_req { + TipRequest::SpecificTip(tip) => assert_eq!( + tip, + StacksBlockId::from_hex( + "7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392" + ) + .unwrap() + ), + _ => panic!(), + } // tip can be skipped let query_txt_bad = "tip=bad"; assert_eq!( HttpRequestType::get_chain_tip_query(Some(query_txt_bad)), - None + TipRequest::UseLatestAnchoredTip ); // tip can be skipped let query_txt_none = "tip=bad"; assert_eq!( HttpRequestType::get_chain_tip_query(Some(query_txt_none)), - None + TipRequest::UseLatestAnchoredTip ); } diff --git a/src/net/inv.rs b/src/net/inv.rs index 891c6d21ff..5cc2f762c8 100644 --- a/src/net/inv.rs +++ b/src/net/inv.rs @@ -74,12 +74,7 @@ pub const INV_SYNC_INTERVAL: u64 = 150; pub const INV_SYNC_INTERVAL: u64 = 0; #[cfg(not(test))] -pub const FULL_INV_SYNC_INTERVAL: u64 = 12 * 3600; -#[cfg(test)] -pub const FULL_INV_SYNC_INTERVAL: u64 = 60; - -#[cfg(not(test))] -pub const INV_REWARD_CYCLES: u64 = 3; +pub const INV_REWARD_CYCLES: u64 = 2; #[cfg(test)] pub const INV_REWARD_CYCLES: u64 = 1; @@ -546,7 +541,7 @@ pub struct NeighborBlockStats { pub pox_inv: Option, /// Received BlocksInv pub blocks_inv: Option, - /// Last time we did a full scan + /// Last time we did a scan pub last_rescan_timestamp: u64, /// Finished synchronizing? pub done: bool, @@ -990,14 +985,9 @@ pub struct InvState { hint_do_rescan: bool, /// last time a rescan was completed last_rescanned_at: u64, - /// Should we do a full rescan? - hint_do_full_rescan: bool, - /// last time a full rescan was completed, in seconds - last_full_rescanned_at: u64, /// How many passes -- short and full -- have we done? num_inv_syncs: u64, - num_full_inv_syncs: u64, /// What's the last reward cycle we _started_ the inv scan at? pub block_sortition_start: u64, @@ -1017,12 +1007,9 @@ impl InvState { hint_learned_data: false, hint_learned_data_height: u64::MAX, hint_do_rescan: true, - hint_do_full_rescan: false, last_rescanned_at: 0, - last_full_rescanned_at: 0, num_inv_syncs: 0, - num_full_inv_syncs: 0, block_sortition_start: 0, } @@ -1048,9 +1035,11 @@ impl InvState { let mut added = 0; for peer in peers.iter() { if let Some(stats) = self.block_stats.get_mut(peer) { + debug!("Already tracking inventories of peer {:?}", &peer); stats.reset_pox_scan(0); stats.is_bootstrap_peer = bootstrap_peers.contains(&peer); } else if self.block_stats.len() < max_neighbors { + debug!("Will track inventories of new peer {:?}", &peer); self.block_stats.insert( peer.clone(), NeighborBlockStats::new( @@ -1193,6 +1182,12 @@ impl InvState { ret = true; } } + if !ret { + debug!( + "Have {} block_stats, but none represent useful data for the downloader", + self.block_stats.len() + ); + } ret } @@ -1264,14 +1259,17 @@ impl InvState { // genesis snapshot and doesn't correspond to anything (the 1st snapshot is block 0) let set = if microblocks { debug!( - "Neighbor {:?} now has confirmed microblock stream at {} ({})", - neighbor_key, sn.block_height, consensus_hash + "Neighbor {:?} now has confirmed microblock stream at {} ({}) (sortition {})", + neighbor_key, sn.block_height, consensus_hash, sn.block_height - sortdb.first_block_height ); stats.inv.set_microblocks_bit(sn.block_height) } else { debug!( - "Neighbor {:?} now has block at {} ({})", - neighbor_key, sn.block_height, consensus_hash + "Neighbor {:?} now has block at {} ({}) (sortition {})", + neighbor_key, + sn.block_height, + consensus_hash, + sn.block_height - sortdb.first_block_height ); stats.inv.set_block_bit(sn.block_height) }; @@ -1574,7 +1572,8 @@ impl PeerNetwork { .get_peer_sortition_snapshot(sortdb, &stable_tip_burn_block_hash)? .is_none() { - // we don't know about this remote peer's stable burnchain tip either + // we don't know about this remote peer's stable burnchain tip either, so ask + // for no blocks. debug!("{:?}: remote neighbor {:?}'s burnchain stable view tip is {}-{:?}, which we do not know", &self.local_peer, nk, stable_tip_height, &stable_tip_burn_block_hash); return Ok(0); } @@ -1597,7 +1596,12 @@ impl PeerNetwork { { self.burnchain.pox_constants.reward_cycle_length as u64 } else { - max_burn_block_height - target_block_height + 1 + if target_block_height > max_burn_block_height { + debug!("{:?}: will not send GetBlocksInv to {:?}, since we are sync'ed up to its highest sortition block (target block is {}, max burn block is {})", &self.local_peer, nk, target_block_height, max_burn_block_height); + 0 + } else { + max_burn_block_height - target_block_height + 1 + } }; if num_blocks == 0 { @@ -1645,20 +1649,22 @@ impl PeerNetwork { ); let num_blocks = match self.get_convo(nk) { - Some(convo) => match self.get_getblocksinv_num_blocks( - sortdb, - target_block_reward_cycle, - nk, - stats, - convo, - )? { - 0 => { - // cannot ask this peer for any blocks in this reward cycle - debug!("{:?}: no blocks available from {}", &self.local_peer, nk); - return Ok(None); + Some(convo) => { + match self.get_getblocksinv_num_blocks( + sortdb, + target_block_reward_cycle, + nk, + stats, + convo, + )? { + 0 => { + // cannot ask this peer for any blocks in this reward cycle + debug!("{:?}: no blocks available from {} at cycle {} (which starts at height {})", &self.local_peer, nk, target_block_reward_cycle, self.burnchain.reward_cycle_to_block_height(target_block_reward_cycle)); + return Ok(None); + } + x => x, } - x => x, - }, + } None => { debug!("{:?}: no conversation open for {}", &self.local_peer, nk); return Ok(None); @@ -1767,12 +1773,7 @@ impl PeerNetwork { } /// Determine at which reward cycle to begin scanning inventories - fn get_block_scan_start( - &self, - sortdb: &SortitionDB, - highest_remote_reward_cycle: u64, - full_rescan: bool, - ) -> u64 { + fn get_block_scan_start(&self, sortdb: &SortitionDB, highest_remote_reward_cycle: u64) -> u64 { let (consensus_hash, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) .unwrap_or((ConsensusHash::empty(), BlockHeaderHash([0u8; 32]))); @@ -1794,11 +1795,13 @@ impl PeerNetwork { highest_remote_reward_cycle.saturating_sub(self.connection_opts.inv_reward_cycles), ); - if full_rescan { - 0 - } else { - start_reward_cycle - } + test_debug!( + "begin blocks inv scan at {} = min({},{})", + start_reward_cycle, + stacks_tip_rc, + highest_remote_reward_cycle.saturating_sub(self.connection_opts.inv_reward_cycles) + ); + start_reward_cycle } /// Start requesting the next batch of PoX inventories @@ -1808,7 +1811,6 @@ impl PeerNetwork { nk: &NeighborKey, stats: &mut NeighborBlockStats, request_timeout: u64, - full_rescan: bool, ) -> Result<(), net_error> { let (target_pox_reward_cycle, getpoxinv) = match self .make_next_getpoxinv(sortdb, nk, stats)? @@ -1816,8 +1818,13 @@ impl PeerNetwork { Some(x) => x, None => { // proceed to block scan - let scan_start_rc = - self.get_block_scan_start(sortdb, stats.inv.get_pox_height(), full_rescan); + let scan_start_rc = self.get_block_scan_start( + sortdb, + self.burnchain + .block_height_to_reward_cycle(stats.inv.get_block_height()) + .unwrap_or(0), + ); + debug!("{:?}: cannot make any more GetPoxInv requests for {:?}; proceeding to block inventory scan at reward cycle {}", &self.local_peer, nk, scan_start_rc); stats.reset_block_scan(scan_start_rc); return Ok(()); @@ -1844,7 +1851,6 @@ impl PeerNetwork { sortdb: &SortitionDB, nk: &NeighborKey, stats: &mut NeighborBlockStats, - full_rescan: bool, ibd: bool, ) -> Result { if stats.done { @@ -1876,14 +1882,13 @@ impl PeerNetwork { // react to divergences by deepening our rescan. let scan_start_rc = self.get_block_scan_start( sortdb, - stats - .target_pox_reward_cycle - .saturating_sub(INV_REWARD_CYCLES), - full_rescan, + self.burnchain + .block_height_to_reward_cycle(stats.inv.get_block_height()) + .unwrap_or(0), ); debug!( - "{:?}: proceeding to block inventory scan for {:?} (diverged) at reward cycle {} (ibd={}, full={})", - &self.local_peer, nk, scan_start_rc, ibd, full_rescan + "{:?}: proceeding to block inventory scan for {:?} (diverged) at reward cycle {} (ibd={})", + &self.local_peer, nk, scan_start_rc, ibd ); stats.learned_data = true; @@ -1981,8 +1986,12 @@ impl PeerNetwork { } // proceed to block scan. - let scan_start = - self.get_block_scan_start(sortdb, stats.inv.get_pox_height(), full_rescan); + let scan_start = self.get_block_scan_start( + sortdb, + self.burnchain + .block_height_to_reward_cycle(stats.inv.get_block_height()) + .unwrap_or(0), + ); debug!( "{:?}: proceeding to block inventory scan for {:?} at reward cycle {}", &self.local_peer, nk, scan_start @@ -2051,8 +2060,9 @@ impl PeerNetwork { if ibd && stats.status == NodeStatus::Diverged { // we were in the initial block download, and we diverged. // we should try and deepen the scan. - stats.block_reward_cycle = - stats.block_reward_cycle.saturating_sub(INV_REWARD_CYCLES); + stats.block_reward_cycle = stats + .block_reward_cycle + .saturating_sub(self.connection_opts.inv_reward_cycles); let learned_data_height = self .burnchain .reward_cycle_to_block_height(stats.block_reward_cycle); @@ -2129,7 +2139,6 @@ impl PeerNetwork { nk: &NeighborKey, stats: &mut NeighborBlockStats, request_timeout: u64, - full_rescan: bool, ibd: bool, ) -> Result { while !stats.done { @@ -2141,10 +2150,10 @@ impl PeerNetwork { let again = match stats.state { InvWorkState::GetPoxInvBegin => self - .inv_getpoxinv_begin(sortdb, nk, stats, request_timeout, full_rescan) + .inv_getpoxinv_begin(sortdb, nk, stats, request_timeout) .and_then(|_| Ok(true))?, InvWorkState::GetPoxInvFinish => { - self.inv_getpoxinv_try_finish(sortdb, nk, stats, full_rescan, ibd)? + self.inv_getpoxinv_try_finish(sortdb, nk, stats, ibd)? } InvWorkState::GetBlocksInvBegin => self .inv_getblocksinv_begin(sortdb, nk, stats, request_timeout) @@ -2287,14 +2296,7 @@ impl PeerNetwork { stats.done ); if !stats.done { - match network.inv_sync_run( - sortdb, - nk, - stats, - inv_state.request_timeout, - inv_state.hint_do_full_rescan, - ibd, - ) { + match network.inv_sync_run(sortdb, nk, stats, inv_state.request_timeout, ibd) { Ok(d) => d, Err(net_error::StaleView) => { // stop work on this state machine -- it needs to be restarted. @@ -2340,7 +2342,7 @@ impl PeerNetwork { debug!("{:?}: remote neighbor {:?} diverged (at {}), so try re-scanning at height {}", &network.local_peer, &nk, stats.learned_data_height, inv_state.hint_learned_data_height); } else { debug!( - "{:?}: learned something new from {:?} at height {}", + "{:?}: learned to scan from {:?} at height {}", &network.local_peer, &nk, stats.learned_data_height ); } @@ -2374,51 +2376,15 @@ impl PeerNetwork { network.get_block_scan_start( sortdb, network.pox_id.num_inventory_reward_cycles() as u64, - inv_state.hint_do_full_rescan, ), )) .saturating_sub(sortdb.first_block_height); debug!( - "{:?}: inventory sync finished; sortition start is {} (do rescan? {})", - &network.local_peer, - inv_state.block_sortition_start, - inv_state.hint_do_full_rescan + "{:?}: inventory sync finished; sortition start is {}", + &network.local_peer, inv_state.block_sortition_start, ); - let was_full = inv_state.hint_do_full_rescan; - if was_full { - let synced_with_bootstrap_peer = if ibd { - // make sure we've sync'ed with at least one bootstrap peer before - // clearing the hint_do_full_rescan flag - let synced = bootstrap_peers.len() == 0 - || !bootstrap_peers.is_disjoint(&fully_synced_peers); - if synced { - debug!( - "{:?}: finished full inventory rescan in initial block download with {} always-allowed peer(s)", - &network.local_peer, - bootstrap_peers.len() - ); - } else { - debug!("{:?}: did NOT finish full inventory rescan in initial block download", &network.local_peer); - } - synced - } else { - // this is best-effort if not in initial block-download - debug!( - "{:?}: finished best-effort full inventory rescan", - &network.local_peer - ); - true - }; - - if synced_with_bootstrap_peer { - inv_state.last_full_rescanned_at = get_epoch_time_secs(); - inv_state.hint_do_full_rescan = false; - inv_state.num_full_inv_syncs += 1; - } - } - if !inv_state.hint_learned_data && inv_state.block_stats.len() > 0 { // did a full scan without learning anything new inv_state.last_rescanned_at = get_epoch_time_secs(); @@ -2430,9 +2396,7 @@ impl PeerNetwork { &network.local_peer, &inv_state.block_stats.len(); "ibd" => %ibd, - "was_full" => %was_full, "num_inv_syncs" => %inv_state.num_inv_syncs, - "num_full_inv_syncs" => %inv_state.num_full_inv_syncs, "num_sync_neighbors" => &inv_state.block_stats.len() ); } else { @@ -2446,27 +2410,24 @@ impl PeerNetwork { &network.local_peer, inv_state.block_stats.len(); "ibd" => %ibd, - "was_full" => %was_full, "num_inv_syncs" => %inv_state.num_inv_syncs, - "num_full_inv_syncs" => %inv_state.num_full_inv_syncs, "num_sync_neighbors" => &inv_state.block_stats.len() ); } - if inv_state.last_full_rescanned_at + network.connection_opts.full_inv_sync_interval - < get_epoch_time_secs() - { - if !ibd && !inv_state.hint_do_full_rescan { - debug!("{:?}: schedule full inventory sync", &network.local_peer); - inv_state.hint_do_full_rescan = true; - } - } - let bad_peers = inv_state.cull_bad_peers(); for bad_peer in bad_peers { + info!( + "{:?}: will no longer track inventory of bad peer {:?}", + &network.local_peer, &bad_peer + ); new_sync_peers.remove(&bad_peer); } for dead_peer in dead_peers.iter() { + info!( + "{:?}: will no longer track inventory of dead peer {:?}", + &network.local_peer, &dead_peer + ); new_sync_peers.remove(dead_peer); } @@ -2482,8 +2443,10 @@ impl PeerNetwork { && good_sync_peers_set.len() < (network.connection_opts.num_neighbors as usize) { + debug!("{:?}: good sync peer {:?}", &network.local_peer, &nk); good_sync_peers_set.insert(nk); } else { + debug!("{:?}: random sync peer {:?}", &network.local_peer, &nk); random_sync_peers_list.push(nk); } } @@ -2500,8 +2463,9 @@ impl PeerNetwork { } } else { debug!( - "{:?}: in initial block download; only inv-sync with always-allowed peers", - &network.local_peer + "{:?}: in initial block download; only inv-sync with {} always-allowed peers", + &network.local_peer, + good_sync_peers_set.len() ); } @@ -3406,7 +3370,9 @@ mod test { peer_1 .with_network_state(|sortdb, chainstate, network, _relayer, _mempool| { network.refresh_local_peer().unwrap(); - network.refresh_burnchain_view(sortdb, chainstate).unwrap(); + network + .refresh_burnchain_view(sortdb, chainstate, false) + .unwrap(); network.refresh_sortition_view(sortdb).unwrap(); Ok(()) }) @@ -4076,221 +4042,6 @@ mod test { }) } - #[test] - #[ignore] - fn test_sync_inv_2_peers_plain_full_sync() { - with_timeout(600, || { - let mut peer_1_config = - TestPeerConfig::new("test_sync_inv_2_peers_full_sync", 32000, 42000); - let mut peer_2_config = - TestPeerConfig::new("test_sync_inv_2_peers_full_sync", 32001, 42001); - - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); - - let num_blocks = ((GETPOXINV_MAX_BITLEN as u64) + INV_REWARD_CYCLES) * 4; - let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peer_1.sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - sn.block_height + 1 - }; - - for i in 0..num_blocks { - let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); - - peer_1.next_burnchain_block(burn_ops.clone()); - peer_2.next_burnchain_block(burn_ops.clone()); - - peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); - peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - - let num_burn_blocks = { - let sn = SortitionDB::get_canonical_burn_chain_tip( - peer_1.sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - sn.block_height + 1 - }; - - let mut round = 0; - let mut inv_1_count = 0; - let mut inv_2_count = 0; - let mut inv_1_full_count = 0; - let mut inv_2_full_count = 0; - - // there must be a wall-clock delay. - // the first full-sync happens immediately, so don't count it. - let start_time = get_epoch_time_secs(); - let num_full_syncs = 5; - let expected_time = (num_full_syncs - 1) * FULL_INV_SYNC_INTERVAL; - - while inv_1_count < num_blocks - || inv_2_count < num_blocks - || inv_1_full_count < num_full_syncs - || inv_2_full_count < num_full_syncs - { - let _ = peer_1.step(); - let _ = peer_2.step(); - - let x = match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - ( - inv.get_inv_num_blocks(&peer_2.to_neighbor().addr), - inv.num_full_inv_syncs, - ) - } - None => (0, 0), - }; - inv_1_count = x.0; - inv_1_full_count = x.1; - - let x = match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - ( - inv.get_inv_num_blocks(&peer_1.to_neighbor().addr), - inv.num_full_inv_syncs, - ) - } - None => (0, 0), - }; - inv_2_count = x.0; - inv_2_full_count = x.1; - - // nothing should break - match peer_1.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} - } - - match peer_2.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} - } - - round += 1; - - info!( - "Peer 1: {},{} Peer 2: {},{}", - inv_1_count, inv_1_full_count, inv_2_count, inv_2_full_count - ); - } - - let finish_time = get_epoch_time_secs(); - info!( - "Completed walk round {} step(s) and {} seconds", - round, - finish_time.saturating_sub(start_time) - ); - - assert!( - finish_time.saturating_sub(start_time) > expected_time, - "BUG: expected {}s, got {}s", - expected_time, - finish_time.saturating_sub(start_time) - ); - - peer_1.dump_frontier(); - peer_2.dump_frontier(); - - info!( - "Peer 1 stats: {:?}", - &peer_1.network.inv_state.as_ref().unwrap().block_stats - ); - info!( - "Peer 2 stats: {:?}", - &peer_2.network.inv_state.as_ref().unwrap().block_stats - ); - - let peer_1_inv = peer_2 - .network - .inv_state - .as_ref() - .unwrap() - .block_stats - .get(&peer_1.to_neighbor().addr) - .unwrap() - .inv - .clone(); - let peer_2_inv = peer_1 - .network - .inv_state - .as_ref() - .unwrap() - .block_stats - .get(&peer_2.to_neighbor().addr) - .unwrap() - .inv - .clone(); - - info!("Peer 1 inv: {:?}", &peer_1_inv); - info!("Peer 2 inv: {:?}", &peer_2_inv); - - info!("peer 1's view of peer 2: {:?}", &peer_2_inv); - - assert_eq!(peer_2_inv.num_sortitions, num_burn_blocks); - - // peer 1 should have learned that peer 2 has all the blocks - for i in 0..num_blocks { - assert!( - peer_2_inv.has_ith_block(i + first_stacks_block_height), - "Missing block {} (+ {})", - i, - first_stacks_block_height - ); - } - - // peer 1 should have learned that peer 2 has all the microblock streams - for i in 1..(num_blocks - 1) { - assert!( - peer_2_inv.has_ith_microblock_stream(i + first_stacks_block_height), - "Missing microblock {} (+ {})", - i, - first_stacks_block_height - ); - } - - let peer_1_inv = peer_2 - .network - .inv_state - .as_ref() - .unwrap() - .block_stats - .get(&peer_1.to_neighbor().addr) - .unwrap() - .inv - .clone(); - test_debug!("peer 2's view of peer 1: {:?}", &peer_1_inv); - - assert_eq!(peer_1_inv.num_sortitions, num_burn_blocks); - - // peer 2 should have learned that peer 1 has all the blocks as well - for i in 0..num_blocks { - assert!( - peer_1_inv.has_ith_block(i + first_stacks_block_height), - "Missing block {} (+ {})", - i, - first_stacks_block_height - ); - } - }) - } - #[test] #[ignore] fn test_sync_inv_2_peers_stale() { @@ -4410,6 +4161,8 @@ mod test { let mut peer_2_config = TestPeerConfig::new("test_sync_inv_2_peers_unstable", 31997, 41998); + let stable_confs = peer_1_config.burnchain.stable_confirmations as u64; + peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); @@ -4442,7 +4195,7 @@ mod test { peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); } else { // peer 1 diverges - test_debug!("Peer 1 diverges"); + test_debug!("Peer 1 diverges at {}", i + first_stacks_block_height); peer_1.next_burnchain_block(vec![]); } } @@ -4460,7 +4213,7 @@ mod test { assert_ne!(sn1.burn_header_hash, sn2.burn_header_hash); } - let num_stable_blocks = num_blocks - 1; + let num_stable_blocks = num_blocks - stable_confs; let num_burn_blocks = { let sn = SortitionDB::get_canonical_burn_chain_tip( @@ -4587,8 +4340,8 @@ mod test { .clone(); test_debug!("peer 2's view of peer 1: {:?}", &peer_1_inv); - assert_eq!(peer_2_inv.num_sortitions, num_burn_blocks - 1); - assert_eq!(peer_1_inv.num_sortitions, num_burn_blocks - 1); + assert_eq!(peer_2_inv.num_sortitions, num_burn_blocks - stable_confs); + assert_eq!(peer_1_inv.num_sortitions, num_burn_blocks - stable_confs); // only 8 reward cycles -- we couldn't agree on the 9th assert_eq!(peer_1_inv.pox_inv, vec![255]); @@ -4596,7 +4349,7 @@ mod test { // peer 1 should have learned that peer 2 has all the blocks, up to the point of // instability - for i in 0..(num_blocks - 1) { + for i in 0..(num_blocks - stable_confs) { assert!(peer_2_inv.has_ith_block(i + first_stacks_block_height)); if i > 0 { assert!(peer_2_inv.has_ith_microblock_stream(i + first_stacks_block_height)); @@ -4605,16 +4358,12 @@ mod test { } } - for i in 0..(num_blocks - 1) { + for i in 0..(num_blocks - stable_confs) { assert!(peer_1_inv.has_ith_block(i + first_stacks_block_height)); - if i > 0 && i != num_blocks - 2 { - // peer 1 doesn't have the final microblock stream, since no anchor block confirmed it - assert!(peer_1_inv.has_ith_microblock_stream(i + first_stacks_block_height)); - } } - assert!(!peer_2_inv.has_ith_block(num_blocks - 1)); - assert!(!peer_2_inv.has_ith_microblock_stream(num_blocks - 1)); + assert!(!peer_2_inv.has_ith_block(num_blocks - stable_confs)); + assert!(!peer_2_inv.has_ith_microblock_stream(num_blocks - stable_confs)); }) } @@ -4761,11 +4510,15 @@ mod test { round += 1; test_debug!( - "\n\ninv_1_count = {}, inv_2_count = {}, peer_1_sorts = {}, peer_2_sorts = {}", + "\n\ninv_1_count = {} for Error { @@ -313,6 +319,7 @@ impl fmt::Display for Error { Error::ConnectionCycle => write!(f, "Tried to connect to myself"), Error::NotFoundError => write!(f, "Requested data not found"), Error::Transient(ref s) => write!(f, "Transient network error: {}", s), + Error::ExpectedEndOfStream => write!(f, "Expected end-of-stream"), } } } @@ -372,6 +379,7 @@ impl error::Error for Error { Error::ConnectionCycle => None, Error::NotFoundError => None, Error::Transient(ref _s) => None, + Error::ExpectedEndOfStream => None, } } } @@ -450,6 +458,16 @@ impl<'de> Deserialize<'de> for PeerAddress { } impl PeerAddress { + pub fn from_slice(bytes: &[u8]) -> Option { + if bytes.len() != 16 { + return None; + } + + let mut bytes16 = [0u8; 16]; + bytes16.copy_from_slice(&bytes[0..16]); + Some(PeerAddress(bytes16)) + } + /// Is this an IPv4 address? pub fn is_ipv4(&self) -> bool { self.ipv4_octets().is_some() @@ -838,6 +856,17 @@ pub struct NatPunchData { pub nonce: u32, } +define_u8_enum!(MemPoolSyncDataID { + BloomFilter = 0x01, + TxTags = 0x02 +}); + +#[derive(Debug, Clone, PartialEq)] +pub enum MemPoolSyncData { + BloomFilter(BloomFilter), + TxTags([u8; 32], Vec), +} + #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct RelayData { pub peer: NeighborAddress, @@ -965,6 +994,13 @@ impl PeerHost { None => None, } } + + pub fn to_host_port(&self) -> (String, u16) { + match *self { + PeerHost::DNS(ref s, ref p) => (s.clone(), *p), + PeerHost::IP(ref i, ref p) => (format!("{}", i.to_socketaddr(0).ip()), *p), + } + } } /// The data we return on GET /v2/info @@ -982,8 +1018,8 @@ pub struct RPCPeerInfoData { pub stacks_tip: BlockHeaderHash, pub stacks_tip_consensus_hash: ConsensusHash, pub genesis_chainstate_hash: Sha256Sum, - pub unanchored_tip: StacksBlockId, - pub unanchored_seq: u16, + pub unanchored_tip: Option, + pub unanchored_seq: Option, pub exit_at_block_height: Option, } @@ -1031,6 +1067,57 @@ pub struct RPCPoxInfoData { pub next_reward_cycle_in: u64, } +/// Headers response payload +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ExtendedStacksHeader { + pub consensus_hash: ConsensusHash, + #[serde( + serialize_with = "ExtendedStacksHeader_StacksBlockHeader_serialize", + deserialize_with = "ExtendedStacksHeader_StacksBlockHeader_deserialize" + )] + pub header: StacksBlockHeader, + pub parent_block_id: StacksBlockId, +} + +/// In ExtendedStacksHeader, encode the StacksBlockHeader as a hex string +fn ExtendedStacksHeader_StacksBlockHeader_serialize( + header: &StacksBlockHeader, + s: S, +) -> Result { + let bytes = header.serialize_to_vec(); + let header_hex = to_hex(&bytes); + s.serialize_str(&header_hex.as_str()) +} + +/// In ExtendedStacksHeader, encode the StacksBlockHeader as a hex string +fn ExtendedStacksHeader_StacksBlockHeader_deserialize<'de, D: serde::Deserializer<'de>>( + d: D, +) -> Result { + let header_hex = String::deserialize(d)?; + let header_bytes = hex_bytes(&header_hex).map_err(de_Error::custom)?; + StacksBlockHeader::consensus_deserialize(&mut &header_bytes[..]).map_err(de_Error::custom) +} + +impl StacksMessageCodec for ExtendedStacksHeader { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &self.consensus_hash)?; + write_next(fd, &self.header)?; + write_next(fd, &self.parent_block_id)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let ch = read_next(fd)?; + let bh = read_next(fd)?; + let pbid = read_next(fd)?; + Ok(ExtendedStacksHeader { + consensus_hash: ch, + header: bh, + parent_block_id: pbid, + }) + } +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RPCFeeEstimate { pub fee_rate: f64, @@ -1079,6 +1166,15 @@ pub struct HttpRequestMetadata { pub keep_alive: bool, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DataVarResponse { + pub data: String, + #[serde(rename = "proof")] + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub marf_proof: Option, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MapEntryResponse { pub data: String, @@ -1261,12 +1357,20 @@ pub struct RPCNeighborsInfo { pub outbound: Vec, } +#[derive(Debug, Clone, PartialEq)] +pub enum TipRequest { + UseLatestAnchoredTip, + UseLatestUnconfirmedTip, + SpecificTip(StacksBlockId), +} + /// All HTTP request paths we support, and the arguments they carry in their paths #[derive(Debug, Clone, PartialEq)] pub enum HttpRequestType { GetInfo(HttpRequestMetadata), - GetPoxInfo(HttpRequestMetadata, Option), + GetPoxInfo(HttpRequestMetadata, TipRequest), GetNeighbors(HttpRequestMetadata), + GetHeaders(HttpRequestMetadata, u64, TipRequest), GetBlock(HttpRequestMetadata, StacksBlockId), GetMicroblocksIndexed(HttpRequestMetadata, StacksBlockId), GetMicroblocksConfirmed(HttpRequestMetadata, StacksBlockId), @@ -1274,11 +1378,14 @@ pub enum HttpRequestType { GetTransactionUnconfirmed(HttpRequestMetadata, Txid), PostTransaction(HttpRequestMetadata, StacksTransaction, Option), PostBlock(HttpRequestMetadata, ConsensusHash, StacksBlock), - PostMicroblock(HttpRequestMetadata, StacksMicroblock, Option), - GetAccount( + PostMicroblock(HttpRequestMetadata, StacksMicroblock, TipRequest), + GetAccount(HttpRequestMetadata, PrincipalData, TipRequest, bool), + GetDataVar( HttpRequestMetadata, - PrincipalData, - Option, + StacksAddress, + ContractName, + ClarityName, + TipRequest, bool, ), GetMapEntry( @@ -1287,7 +1394,7 @@ pub enum HttpRequestType { ContractName, ClarityName, Value, - Option, + TipRequest, bool, ), FeeRateEstimate(HttpRequestMetadata, TransactionPayload, u64), @@ -1298,22 +1405,17 @@ pub enum HttpRequestType { PrincipalData, ClarityName, Vec, - Option, + TipRequest, ), GetTransferCost(HttpRequestMetadata), GetContractSrc( HttpRequestMetadata, StacksAddress, ContractName, - Option, + TipRequest, bool, ), - GetContractABI( - HttpRequestMetadata, - StacksAddress, - ContractName, - Option, - ), + GetContractABI(HttpRequestMetadata, StacksAddress, ContractName, TipRequest), OptionsPreflight(HttpRequestMetadata, String), GetAttachment(HttpRequestMetadata, Hash160), GetAttachmentsInv(HttpRequestMetadata, StacksBlockId, HashSet), @@ -1322,8 +1424,9 @@ pub enum HttpRequestType { StacksAddress, ContractName, TraitIdentifier, - Option, + TipRequest, ), + MemPoolQuery(HttpRequestMetadata, MemPoolSyncData, Option), /// catch-all for any errors we should surface from parsing ClientError(HttpRequestMetadata, ClientError), } @@ -1401,6 +1504,8 @@ pub enum HttpResponseType { PeerInfo(HttpResponseMetadata, RPCPeerInfoData), PoxInfo(HttpResponseMetadata, RPCPoxInfoData), Neighbors(HttpResponseMetadata, RPCNeighborsInfo), + Headers(HttpResponseMetadata, Vec), + HeaderStream(HttpResponseMetadata), Block(HttpResponseMetadata, StacksBlock), BlockStream(HttpResponseMetadata), Microblocks(HttpResponseMetadata, Vec), @@ -1409,6 +1514,7 @@ pub enum HttpResponseType { StacksBlockAccepted(HttpResponseMetadata, StacksBlockId, bool), MicroblockHash(HttpResponseMetadata, BlockHeaderHash), TokenTransferCost(HttpResponseMetadata, u64), + GetDataVar(HttpResponseMetadata, DataVarResponse), GetMapEntry(HttpResponseMetadata, MapEntryResponse), CallReadOnlyFunction(HttpResponseMetadata, CallReadOnlyResponse), GetAccount(HttpResponseMetadata, AccountEntryResponse), @@ -1418,6 +1524,8 @@ pub enum HttpResponseType { UnconfirmedTransaction(HttpResponseMetadata, UnconfirmedTransactionResponse), GetAttachment(HttpResponseMetadata, GetAttachmentResponse), GetAttachmentsInv(HttpResponseMetadata, GetAttachmentsInvResponse), + MemPoolTxStream(HttpResponseMetadata), + MemPoolTxs(HttpResponseMetadata, Option, Vec), OptionsPreflight(HttpResponseMetadata), TransactionFeeEstimation(HttpResponseMetadata, RPCFeeEstimateResponse), // peer-given error responses @@ -1460,6 +1568,7 @@ pub enum StacksMessageID { Pong = 16, NatPunchRequest = 17, NatPunchReply = 18, + // reserved Reserved = 255, } @@ -1577,6 +1686,7 @@ impl_byte_array_message_codec!(StacksBlockId, 32); impl_byte_array_message_codec!(MessageSignature, 65); impl_byte_array_message_codec!(PeerAddress, 16); impl_byte_array_message_codec!(StacksPublicKeyBuffer, 33); +impl_byte_array_message_codec!(Txid, 32); impl_byte_array_serde!(ConsensusHash); @@ -1707,6 +1817,9 @@ pub const NUM_NEIGHBORS: usize = 32; // maximum number of unconfirmed microblocks can get streamed to us pub const MAX_MICROBLOCKS_UNCONFIRMED: usize = 1024; +// maximum number of block headers we'll get streamed to us +pub const MAX_HEADERS: usize = 2100; + // how long a peer will be denied for if it misbehaves #[cfg(test)] pub const DENY_BAN_DURATION: u64 = 30; // seconds @@ -1728,6 +1841,7 @@ pub struct NetworkResult { pub uploaded_blocks: Vec, // blocks sent to us via the http server pub uploaded_microblocks: Vec, // microblocks sent to us by the http server pub attachments: Vec<(AttachmentInstance, Attachment)>, + pub synced_transactions: Vec, // transactions we downloaded via a mempool sync pub num_state_machine_passes: u64, pub num_inv_sync_passes: u64, pub num_download_passes: u64, @@ -1751,6 +1865,7 @@ impl NetworkResult { uploaded_blocks: vec![], uploaded_microblocks: vec![], attachments: vec![], + synced_transactions: vec![], num_state_machine_passes: num_state_machine_passes, num_inv_sync_passes: num_inv_sync_passes, num_download_passes: num_download_passes, @@ -1768,7 +1883,9 @@ impl NetworkResult { } pub fn has_transactions(&self) -> bool { - self.pushed_transactions.len() > 0 || self.uploaded_transactions.len() > 0 + self.pushed_transactions.len() > 0 + || self.uploaded_transactions.len() > 0 + || self.synced_transactions.len() > 0 } pub fn has_attachments(&self) -> bool { @@ -1780,6 +1897,7 @@ impl NetworkResult { .values() .flat_map(|pushed_txs| pushed_txs.iter().map(|(_, tx)| tx.clone())) .chain(self.uploaded_transactions.iter().map(|x| x.clone())) + .chain(self.synced_transactions.iter().map(|x| x.clone())) .collect() } @@ -2281,7 +2399,7 @@ pub mod test { ..TestPeerConfig::default() }; config.data_url = - UrlString::try_from(format!("http://localhost:{}", config.http_port).as_str()) + UrlString::try_from(format!("http://127.0.0.1:{}", config.http_port).as_str()) .unwrap(); config } @@ -2294,7 +2412,7 @@ pub mod test { ..TestPeerConfig::default() }; config.data_url = - UrlString::try_from(format!("http://localhost:{}", config.http_port).as_str()) + UrlString::try_from(format!("http://127.0.0.1:{}", config.http_port).as_str()) .unwrap(); config } @@ -2393,12 +2511,7 @@ pub mod test { // manually set fees miner.test_with_tx_fees = false; - let mut burnchain = get_burnchain(&test_path, None); - burnchain.first_block_height = config.burnchain.first_block_height; - burnchain.first_block_hash = config.burnchain.first_block_hash; - burnchain.pox_constants = config.burnchain.pox_constants; - - config.burnchain = burnchain.clone(); + config.burnchain.working_dir = get_burnchain(&test_path, None).working_dir; let epochs = config.epochs.clone().unwrap_or_else(|| { StacksEpoch::unit_test_pre_2_05(config.burnchain.first_block_height) @@ -2437,7 +2550,7 @@ pub mod test { None, config.private_key_expire, PeerAddress::from_ipv4(127, 0, 0, 1), - NETWORK_P2P_PORT, + config.server_port, config.data_url.clone(), &config.asn4_entries, Some(&config.initial_neighbors), @@ -2465,8 +2578,9 @@ pub mod test { let conf = config.clone(); let post_flight_callback = move |clarity_tx: &mut ClarityTx| { + let mut receipts = vec![]; if conf.setup_code.len() > 0 { - clarity_tx.connection().as_transaction(|clarity| { + let receipt = clarity_tx.connection().as_transaction(|clarity| { let boot_code_addr = boot_code_test_addr(); let boot_code_account = StacksAccount { principal: boot_code_addr.to_account_principal(), @@ -2501,9 +2615,11 @@ pub mod test { &boot_code_smart_contract, &boot_code_account, ) - .unwrap(); + .unwrap() }); + receipts.push(receipt); } + debug!("Bootup receipts: {:?}", &receipts); }; let mut boot_data = ChainStateBootData::new( @@ -2527,8 +2643,9 @@ pub mod test { .unwrap(); let (tx, _) = sync_channel(100000); + let mut coord = ChainsCoordinator::test_new_with_observer( - &burnchain, + &config.burnchain, config.network_id, &test_path, OnChainRewardSetProvider(), @@ -2540,7 +2657,7 @@ pub mod test { let mut stacks_node = TestStacksNode::from_chainstate(chainstate); { - // pre-populate burnchain + // pre-populate burnchain, if running on bitcoin let prev_snapshot = SortitionDB::get_first_block_snapshot(sortdb.conn()).unwrap(); let mut fork = TestBurnchainFork::new( prev_snapshot.block_height, @@ -2577,7 +2694,6 @@ pub mod test { config.server_port, ) .unwrap(); - PeerDB::set_local_services(&mut tx, ServiceFlags::RELAY as u16).unwrap(); PeerDB::set_local_private_key( &mut tx, &config.private_key, @@ -2663,7 +2779,7 @@ pub mod test { None, false, false, - 10, + 100, &RPCHandlerArgs::default(), &mut HashSet::new(), ); @@ -2687,7 +2803,7 @@ pub mod test { Some(dns_client), false, false, - 10, + 100, &RPCHandlerArgs::default(), &mut HashSet::new(), ); @@ -2711,39 +2827,6 @@ pub mod test { ret } - // this is a fake block -- don't try inserting it - pub fn empty_burnchain_block(&self, block_height: u64) -> BurnchainBlock { - assert!(block_height + 1 >= self.config.burnchain.first_block_height); - let prev_block_height = block_height - 1; - - let block_hash_i = Uint256::from_u64(block_height); - let mut block_hash_bytes = [0u8; 32]; - block_hash_bytes.copy_from_slice(&block_hash_i.to_u8_slice()); - - let prev_block_hash_i = Uint256::from_u64(prev_block_height); - let mut prev_block_hash_bytes = [0u8; 32]; - prev_block_hash_bytes.copy_from_slice(&prev_block_hash_i.to_u8_slice()); - - BurnchainBlock::Bitcoin(BitcoinBlock { - block_height: block_height + 1, - block_hash: BurnchainHeaderHash(block_hash_bytes), - parent_block_hash: BurnchainHeaderHash(prev_block_hash_bytes), - txs: vec![], - timestamp: get_epoch_time_secs(), - }) - } - - fn make_empty_burnchain_block(&mut self) -> BurnchainBlock { - let empty_block = { - let sortdb = self.sortdb.take().unwrap(); - let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let empty_block = self.empty_burnchain_block(sn.block_height); - self.sortdb = Some(sortdb); - empty_block - }; - empty_block - } - pub fn next_burnchain_block( &mut self, blockstack_ops: Vec, @@ -3090,6 +3173,10 @@ pub mod test { self.next_burnchain_block(vec![]) } + pub fn mempool(&mut self) -> &mut MemPoolDB { + self.mempool.as_mut().unwrap() + } + pub fn chainstate(&mut self) -> &mut StacksChainState { &mut self.stacks_node.as_mut().unwrap().chainstate } @@ -3402,8 +3489,12 @@ pub mod test { let (mut miner_chainstate, _) = StacksChainState::open(false, network_id, &chainstate_path).unwrap(); let sort_iconn = sortdb.index_conn(); + + let mut miner_epoch_info = builder + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) + .unwrap(); let mut epoch = builder - .epoch_begin(&mut miner_chainstate, &sort_iconn) + .epoch_begin(&sort_iconn, &mut miner_epoch_info) .unwrap() .0; @@ -3470,4 +3561,14 @@ pub mod test { debug!("--- END ALL PEERS ({}) -----", peers.len()); } } + + pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { + StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(sk)], + ) + .unwrap() + } } diff --git a/src/net/neighbors.rs b/src/net/neighbors.rs index f770716a5c..6f20334fc1 100644 --- a/src/net/neighbors.rs +++ b/src/net/neighbors.rs @@ -1890,10 +1890,16 @@ impl PeerNetwork { Ok((count, num_allowed_peers as u64)) } - /// Instantiate the neighbor walk to an always-allowed node - fn instantiate_walk_to_always_allowed(&mut self) -> Result<(), net_error> { - let allowed_peers = - PeerDB::get_always_allowed_peers(self.peerdb.conn(), self.local_peer.network_id)?; + /// Instantiate the neighbor walk to an always-allowed node. + /// If we're in the initial block download, then this must also be a *bootstrap* peer. + fn instantiate_walk_to_always_allowed(&mut self, ibd: bool) -> Result<(), net_error> { + let allowed_peers = if ibd { + // only get bootstrap peers + PeerDB::get_bootstrap_peers(&self.peerdb.conn(), self.local_peer.network_id)? + } else { + // can be any peer + PeerDB::get_always_allowed_peers(self.peerdb.conn(), self.local_peer.network_id)? + }; let mut count = 0; for allowed in allowed_peers.iter() { @@ -2764,7 +2770,7 @@ impl PeerNetwork { /// Mask errors by restarting the graph walk. /// Returns the walk result, and a true/false flag to indicate whether or not the work for the /// walk was finished (i.e. we either completed the walk, or we reset the walk) - pub fn walk_peer_graph(&mut self) -> (bool, Option) { + pub fn walk_peer_graph(&mut self, ibd: bool) -> (bool, Option) { if self.walk.is_none() { // time to do a walk yet? if (self.walk_count > self.connection_opts.num_initial_walks @@ -2799,7 +2805,7 @@ impl PeerNetwork { ); // always ensure we're connected to always-allowed outbound peers - let walk_res = match self.instantiate_walk_to_always_allowed() { + let walk_res = match self.instantiate_walk_to_always_allowed(ibd) { Ok(x) => Ok(x), Err(net_error::NotFoundError) => { if self.walk_attempts % (self.connection_opts.walk_inbound_ratio + 1) == 0 { @@ -5716,7 +5722,7 @@ mod test { random_order[i] = i; } let mut rng = thread_rng(); - &mut &random_order.shuffle(&mut rng); + let _ = &mut &random_order.shuffle(&mut rng); for i in random_order.into_iter() { let _ = peers[i].step(); diff --git a/src/net/p2p.rs b/src/net/p2p.rs index 3e529b57f5..25196be20d 100644 --- a/src/net/p2p.rs +++ b/src/net/p2p.rs @@ -20,6 +20,9 @@ use std::collections::HashMap; use std::collections::HashSet; use std::collections::VecDeque; use std::mem; +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::Ipv6Addr; use std::net::SocketAddr; use std::sync::mpsc::sync_channel; use std::sync::mpsc::Receiver; @@ -34,6 +37,8 @@ use mio::net as mio_net; use rand::prelude::*; use rand::thread_rng; +use url; + use burnchains::Address; use burnchains::Burnchain; use burnchains::BurnchainView; @@ -50,7 +55,6 @@ use net::chat::ConversationP2P; use net::chat::NeighborStats; use net::connection::ConnectionOptions; use net::connection::NetworkReplyHandle; -use net::connection::ReplyHandleHttp; use net::connection::ReplyHandleP2P; use net::db::LocalPeer; use net::db::PeerDB; @@ -72,6 +76,7 @@ use net::PeerAddress; use net::*; use util::db::DBConn; use util::db::Error as db_error; +use util::get_epoch_time_ms; use util::get_epoch_time_secs; use util::hash::to_hex; use util::log; @@ -84,8 +89,11 @@ use crate::types::chainstate::{PoxId, SortitionId, StacksBlockHeader}; #[derive(Debug)] pub enum NetworkRequest { Ban(Vec), - AdvertizeBlocks(BlocksAvailableMap), // announce to all wanting neighbors that we have these blocks - AdvertizeMicroblocks(BlocksAvailableMap), // announce to all wanting neighbors that we have these confirmed microblock streams + AdvertizeBlocks(BlocksAvailableMap, HashMap), // announce to all wanting neighbors that we have these blocks + AdvertizeMicroblocks( + BlocksAvailableMap, + HashMap)>, + ), // announce to all wanting neighbors that we have these confirmed microblock streams Relay(NeighborKey, StacksMessage), Broadcast(Vec, StacksMessageType), } @@ -133,14 +141,22 @@ impl NetworkHandle { } /// Advertize blocks - pub fn advertize_blocks(&mut self, blocks: BlocksAvailableMap) -> Result<(), net_error> { - let req = NetworkRequest::AdvertizeBlocks(blocks); + pub fn advertize_blocks( + &mut self, + blocks: BlocksAvailableMap, + block_data: HashMap, + ) -> Result<(), net_error> { + let req = NetworkRequest::AdvertizeBlocks(blocks, block_data); self.send_request(req) } /// Advertize microblocks - pub fn advertize_microblocks(&mut self, blocks: BlocksAvailableMap) -> Result<(), net_error> { - let req = NetworkRequest::AdvertizeMicroblocks(blocks); + pub fn advertize_microblocks( + &mut self, + microblocks: BlocksAvailableMap, + microblock_data: HashMap)>, + ) -> Result<(), net_error> { + let req = NetworkRequest::AdvertizeMicroblocks(microblocks, microblock_data); self.send_request(req) } @@ -189,6 +205,21 @@ pub enum PeerNetworkWorkState { Prune, } +/// The four states the mempool sync state machine can be in +#[derive(Debug, Clone, PartialEq)] +pub enum MempoolSyncState { + /// Picking an outbound peer + PickOutboundPeer, + /// Resolving its data URL to a SocketAddr. Contains the data URL, DNS request handle, and + /// mempool page ID + ResolveURL(UrlString, DNSRequest, Txid), + /// Sending the request for mempool transactions. Contains the data URL, resolved socket, and + /// mempool page. + SendQuery(UrlString, SocketAddr, Txid), + /// Receiving the mempool response. Contains the URL, socket address, and event ID + RecvResponse(UrlString, SocketAddr, usize), +} + pub type PeerMap = HashMap; #[derive(Debug)] @@ -237,6 +268,7 @@ pub struct PeerNetwork { // work state -- we can be walking, fetching block inventories, fetching blocks, pruning, etc. pub work_state: PeerNetworkWorkState, + have_data_to_download: bool, // neighbor walk state pub walk: Option, @@ -267,6 +299,13 @@ pub struct PeerNetwork { // peer attachment downloader pub attachments_downloader: Option, + // outstanding request to perform a mempool sync + // * mempool_sync_deadline is when the next mempool sync must start + // * mempool_sync_timeout is when the current mempool sync must stop + mempool_state: MempoolSyncState, + mempool_sync_deadline: u64, + mempool_sync_timeout: u64, + // how often we pruned a given inbound/outbound peer pub prune_outbound_counts: HashMap, pub prune_inbound_counts: HashMap, @@ -372,6 +411,7 @@ impl PeerNetwork { connection_opts: connection_opts, work_state: PeerNetworkWorkState::GetPublicIP, + have_data_to_download: false, walk: None, walk_deadline: 0, @@ -391,6 +431,10 @@ impl PeerNetwork { block_downloader: None, attachments_downloader: None, + mempool_state: MempoolSyncState::PickOutboundPeer, + mempool_sync_deadline: 0, + mempool_sync_timeout: 0, + prune_outbound_counts: HashMap::new(), prune_inbound_counts: HashMap::new(), @@ -987,15 +1031,15 @@ impl PeerNetwork { } Ok(()) } - NetworkRequest::AdvertizeBlocks(blocks) => { + NetworkRequest::AdvertizeBlocks(blocks, block_data) => { if !(cfg!(test) && self.connection_opts.disable_block_advertisement) { - self.advertize_blocks(blocks)?; + self.advertize_blocks(blocks, block_data)?; } Ok(()) } - NetworkRequest::AdvertizeMicroblocks(mblocks) => { + NetworkRequest::AdvertizeMicroblocks(mblocks, mblock_data) => { if !(cfg!(test) && self.connection_opts.disable_block_advertisement) { - self.advertize_microblocks(mblocks)?; + self.advertize_microblocks(mblocks, mblock_data)?; } Ok(()) } @@ -2087,7 +2131,7 @@ impl PeerNetwork { /// Update the state of our neighbor walk. /// Return true if we finish, and true if we're throttled - fn do_network_neighbor_walk(&mut self) -> Result { + fn do_network_neighbor_walk(&mut self, ibd: bool) -> Result { if cfg!(test) && self.connection_opts.disable_neighbor_walk { test_debug!("neighbor walk is disabled"); return Ok(true); @@ -2096,7 +2140,7 @@ impl PeerNetwork { debug!("{:?}: walk peer graph", &self.local_peer); // walk the peer graph and deal with new/dropped connections - let (done, walk_result_opt) = self.walk_peer_graph(); + let (done, walk_result_opt) = self.walk_peer_graph(ibd); match walk_result_opt { None => {} Some(walk_result) => { @@ -2107,6 +2151,52 @@ impl PeerNetwork { Ok(done) } + /// Do a mempool sync. Return any transactions we might receive. + fn do_network_mempool_sync( + &mut self, + dns_client_opt: &mut Option<&mut DNSClient>, + mempool: &MemPoolDB, + chainstate: &mut StacksChainState, + ibd: bool, + ) -> Result>, net_error> { + if ibd { + return Ok(None); + } + + match self.do_mempool_sync(dns_client_opt, mempool, chainstate)? { + (true, txs_opt) => { + // did we run to completion? + if let Some(txs) = txs_opt { + debug!( + "{:?}: Mempool sync obtained {} transactions from mempool sync, and done receiving", + &self.local_peer, + txs.len() + ); + + self.mempool_sync_deadline = + get_epoch_time_secs() + self.connection_opts.mempool_sync_interval; + return Ok(Some(txs)); + } else { + return Ok(None); + } + } + (false, txs_opt) => { + // did we get some transactions, but have more to get? + if let Some(txs) = txs_opt { + debug!( + "{:?}: Mempool sync obtained {} transactions from mempool sync, but have more", + &self.local_peer, + txs.len() + ); + + return Ok(Some(txs)); + } else { + return Ok(None); + } + } + } + } + /// Begin the process of learning this peer's public IP address. /// Return Ok(finished with this step) /// Return Err(..) on failure @@ -2421,6 +2511,7 @@ impl PeerNetwork { fn do_network_block_download( &mut self, sortdb: &SortitionDB, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, dns_client: &mut DNSClient, ibd: bool, @@ -2443,7 +2534,7 @@ impl PeerNetwork { mut microblocks, mut broken_http_peers, mut broken_p2p_peers, - ) = match self.download_blocks(sortdb, chainstate, dns_client, ibd) { + ) = match self.download_blocks(sortdb, mempool, chainstate, dns_client, ibd) { Ok(x) => x, Err(net_error::NotConnected) => { // there was simply nothing to do @@ -2691,8 +2782,8 @@ impl PeerNetwork { } /// Push any blocks and microblock streams that we're holding onto out to our neighbors. - /// Push all but the last arrived Stacks block (the block-push and blocks-available protocols - /// should handle this, and we don't want the network to DDoS itself to death). + /// Start with the most-recently-arrived data, since this node is likely to have already + /// fetched older data via the block-downloader. fn try_push_local_data( &mut self, sortdb: &SortitionDB, @@ -2743,16 +2834,19 @@ impl PeerNetwork { .map(|inv_state| inv_state.block_stats.keys().map(|nk| nk.clone()).collect()) .unwrap_or(vec![]); - if self.antientropy_start_reward_cycle >= self.pox_id.num_inventory_reward_cycles() as u64 { - debug!("AntiEntropy: wrap around back to reward cycle 0"); - self.antientropy_start_reward_cycle = 0; + if self.antientropy_start_reward_cycle == 0 { + debug!( + "AntiEntropy: wrap around back to reward cycle {}", + self.pox_id.num_inventory_reward_cycles().saturating_sub(1) + ); + self.antientropy_start_reward_cycle = + self.pox_id.num_inventory_reward_cycles().saturating_sub(1) as u64; } let reward_cycle_start = self.antientropy_start_reward_cycle; - let reward_cycle_finish = cmp::min( - self.antientropy_start_reward_cycle + self.connection_opts.inv_reward_cycles, - self.pox_id.num_inventory_reward_cycles() as u64, - ); + let reward_cycle_finish = + self.antientropy_start_reward_cycle + .saturating_sub(self.connection_opts.inv_reward_cycles) as u64; self.antientropy_start_reward_cycle = reward_cycle_finish; @@ -2768,7 +2862,8 @@ impl PeerNetwork { reward_cycle_finish ); - for reward_cycle in reward_cycle_start..reward_cycle_finish { + // go from latest to earliest reward cycle + for reward_cycle in (reward_cycle_finish..reward_cycle_start).rev() { let local_blocks_inv = match self.get_local_blocks_inv(sortdb, chainstate, reward_cycle) { Ok(inv) => inv, @@ -3028,11 +3123,453 @@ impl PeerNetwork { Ok(()) } + /// Extract an IP address from a UrlString if it exists + pub fn try_get_url_ip(url_str: &UrlString) -> Result, net_error> { + let url = url_str.parse_to_block_url()?; + let port = match url.port_or_known_default() { + Some(p) => p, + None => { + warn!("Unsupported URL {:?}: unknown port", &url); + return Ok(None); + } + }; + match url.host() { + Some(url::Host::Domain(d)) => { + if d == "localhost" { + Ok(Some(SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + port, + ))) + } else { + // can't use this + Ok(None) + } + } + Some(url::Host::Ipv4(addr)) => Ok(Some(SocketAddr::new(IpAddr::V4(addr), port))), + Some(url::Host::Ipv6(addr)) => Ok(Some(SocketAddr::new(IpAddr::V6(addr), port))), + None => { + warn!("Unsupported URL {:?}", &url_str); + Ok(None) + } + } + } + + /// Reset a mempool sync + fn mempool_sync_reset(&mut self) { + self.mempool_state = MempoolSyncState::PickOutboundPeer; + self.mempool_sync_timeout = 0; + } + + /// Pick a peer to mempool sync with. + /// Returns Ok(None) if we're done syncing the mempool. + /// Returns Ok(Some(..)) if we're not done, and can proceed + /// Returns the new sync state -- either ResolveURL if we need to resolve a data URL, + /// or SendQuery if we got the IP address and can just issue the query. + fn mempool_sync_pick_outbound_peer( + &mut self, + dns_client_opt: &mut Option<&mut DNSClient>, + page_id: &Txid, + ) -> Result, net_error> { + if self.peers.len() == 0 { + debug!("No peers connected; cannot do mempool sync"); + return Ok(None); + } + + let mut idx = thread_rng().gen::() % self.peers.len(); + let mut mempool_sync_data_url = None; + for _ in 0..self.peers.len() + 1 { + let event_id = match self.peers.keys().skip(idx).next() { + Some(eid) => *eid, + None => { + idx = 0; + continue; + } + }; + idx = (idx + 1) % self.peers.len(); + + if let Some(convo) = self.peers.get(&event_id) { + if !convo.is_authenticated() || !convo.is_outbound() { + continue; + } + if !ConversationP2P::supports_mempool_query(convo.peer_services) { + continue; + } + if convo.data_url.len() == 0 { + continue; + } + let url = convo.data_url.clone(); + if dns_client_opt.is_none() { + if let Ok(Some(_)) = PeerNetwork::try_get_url_ip(&url) { + } else { + // need a DNS client for this one + continue; + } + } + + mempool_sync_data_url = Some(url); + break; + } + } + + if let Some(url) = mempool_sync_data_url { + self.mempool_sync_begin_resolve_data_url(url, dns_client_opt, page_id) + } else { + debug!("No peer has a data URL, so no mempool sync can happen"); + Ok(None) + } + } + + /// Begin resolving the DNS host of a data URL for mempool sync. + /// Returns Ok(None) if we're done syncing the mempool. + /// Returns Ok(Some(..)) if we're not done, and can proceed + /// Returns the new sync state -- either ResolveURL if we need to resolve a data URL, + /// or SendQuery if we got the IP address and can just issue the query. + fn mempool_sync_begin_resolve_data_url( + &self, + url_str: UrlString, + dns_client_opt: &mut Option<&mut DNSClient>, + page_id: &Txid, + ) -> Result, net_error> { + // start resolving + let url = url_str.parse_to_block_url()?; + let port = match url.port_or_known_default() { + Some(p) => p, + None => { + warn!("Unsupported URL {:?}: unknown port", &url); + return Ok(None); + } + }; + + // bare IP address? + if let Some(addr) = PeerNetwork::try_get_url_ip(&url_str)? { + return Ok(Some(MempoolSyncState::SendQuery( + url_str, + addr, + page_id.clone(), + ))); + } else if let Some(url::Host::Domain(domain)) = url.host() { + if let Some(ref mut dns_client) = dns_client_opt { + // begin DNS query + match dns_client.queue_lookup( + domain.clone(), + port, + get_epoch_time_ms() + self.connection_opts.dns_timeout, + ) { + Ok(_) => {} + Err(_) => { + warn!("Failed to queue DNS lookup on {}", &url_str); + return Ok(None); + } + } + return Ok(Some(MempoolSyncState::ResolveURL( + url_str, + DNSRequest::new(domain.to_string(), port, 0), + page_id.clone(), + ))); + } else { + // can't proceed -- no DNS client + return Ok(None); + } + } else { + // can't proceed + return Ok(None); + } + } + + /// Resolve our picked mempool sync peer's data URL. + /// Returns Ok(true, ..) if we're done syncing the mempool. + /// Returns Ok(false, ..) if there's more to do + /// Returns the socket addr if we ever succeed in resolving it. + fn mempool_sync_resolve_data_url( + &mut self, + url_str: &UrlString, + request: &DNSRequest, + dns_client_opt: &mut Option<&mut DNSClient>, + ) -> Result<(bool, Option), net_error> { + if let Ok(Some(addr)) = PeerNetwork::try_get_url_ip(url_str) { + // URL contains an IP address -- go with that + Ok((false, Some(addr))) + } else if let Some(dns_client) = dns_client_opt { + // keep trying to resolve + match dns_client.poll_lookup(&request.host, request.port) { + Ok(Some(dns_response)) => match dns_response.result { + Ok(mut addrs) => { + if let Some(addr) = addrs.pop() { + // resolved! + return Ok((false, Some(addr))); + } else { + warn!("DNS returned no results for {}", url_str); + return Ok((true, None)); + } + } + Err(msg) => { + warn!("DNS failed to look up {:?}: {}", &url_str, msg); + return Ok((true, None)); + } + }, + Ok(None) => { + // still in-flight + return Ok((false, None)); + } + Err(e) => { + warn!("DNS lookup failed on {:?}: {:?}", url_str, &e); + return Ok((true, None)); + } + } + } else { + // can't do anything + debug!("No DNS client, and URL contains a domain, so no mempool sync can happen"); + return Ok((true, None)); + } + } + + /// Ask the remote peer for its mempool, connecting to it in the process if need be. + /// Returns Ok((true, ..)) if we're done mempool syncing + /// Returns Ok((false, ..)) if there's more to do + /// Returns the event ID on success + fn mempool_sync_send_query( + &mut self, + url: &UrlString, + addr: &SocketAddr, + mempool: &MemPoolDB, + chainstate: &mut StacksChainState, + page_id: Txid, + ) -> Result<(bool, Option), net_error> { + let sync_data = mempool.make_mempool_sync_data()?; + let request = HttpRequestType::MemPoolQuery( + HttpRequestMetadata::from_host(PeerHost::from_socketaddr(addr)), + sync_data, + Some(page_id), + ); + + let event_id = self.connect_or_send_http_request( + url.clone(), + addr.clone(), + request, + mempool, + chainstate, + )?; + return Ok((false, Some(event_id))); + } + + /// Receive the mempool sync response. + /// Return Ok(true, ..) if we're done with the mempool sync. + /// Return Ok(false, ..) if we have more work to do. + /// Returns the page ID of the next request to make, and the list of transactions we got + fn mempool_sync_recv_response( + &mut self, + event_id: usize, + ) -> Result<(bool, Option, Option>), net_error> { + PeerNetwork::with_http(self, |network, http| { + match http.get_conversation(event_id) { + None => { + if http.is_connecting(event_id) { + debug!( + "{:?}: Mempool sync event {} is not connected yet", + &network.local_peer, event_id + ); + return Ok((false, None, None)); + } else { + // conversation died + debug!("{:?}: Mempool sync peer hung up", &network.local_peer); + return Ok((true, None, None)); + } + } + Some(ref mut convo) => { + match convo.try_get_response() { + None => { + // still waiting + debug!( + "{:?}: Mempool sync event {} still waiting for a response", + &network.local_peer, event_id + ); + return Ok((false, None, None)); + } + Some(http_response) => match http_response { + HttpResponseType::MemPoolTxs(_, page_id_opt, txs) => { + debug!("{:?}: Mempool sync received response for {} txs, next page {:?}", &network.local_peer, txs.len(), &page_id_opt); + return Ok((true, page_id_opt, Some(txs))); + } + _ => { + warn!( + "{:?}: Mempool sync request received {:?}", + &network.local_peer, &http_response + ); + return Ok((true, None, None)); + } + }, + } + } + } + }) + } + + /// Do a mempool sync + /// Return true if we're done and can advance to the next state. + /// Returns the transactions as well if the sync ran to completion. + fn do_mempool_sync( + &mut self, + dns_client_opt: &mut Option<&mut DNSClient>, + mempool: &MemPoolDB, + chainstate: &mut StacksChainState, + ) -> Result<(bool, Option>), net_error> { + if get_epoch_time_secs() <= self.mempool_sync_deadline { + debug!( + "{:?}: Wait until {} to do a mempool sync", + &self.local_peer, self.mempool_sync_deadline + ); + return Ok((true, None)); + } + + if self.mempool_sync_timeout == 0 { + // begin new sync + self.mempool_sync_timeout = + get_epoch_time_secs() + self.connection_opts.mempool_sync_timeout; + } else { + if get_epoch_time_secs() > self.mempool_sync_timeout { + debug!( + "{:?}: Mempool sync took too long; terminating", + &self.local_peer + ); + self.mempool_sync_reset(); + return Ok((true, None)); + } + } + + // try advancing states until we get blocked. + // Once we get blocked, return. + loop { + let cur_state = self.mempool_state.clone(); + debug!( + "{:?}: Mempool sync state is {:?}", + &self.local_peer, &cur_state + ); + match cur_state { + MempoolSyncState::PickOutboundPeer => { + // 1. pick a random outbound conversation. + if let Some(next_state) = + self.mempool_sync_pick_outbound_peer(dns_client_opt, &Txid([0u8; 32]))? + { + // success! can advance to either resolve a URL or to send a query + self.mempool_state = next_state; + } else { + // done + self.mempool_sync_reset(); + return Ok((true, None)); + } + } + MempoolSyncState::ResolveURL(ref url_str, ref dns_request, ref page_id) => { + // 2. resolve its data URL + match self.mempool_sync_resolve_data_url( + url_str, + dns_request, + dns_client_opt, + )? { + (false, Some(addr)) => { + // success! advance + self.mempool_state = + MempoolSyncState::SendQuery(url_str.clone(), addr, page_id.clone()); + } + (false, None) => { + // try again later + return Ok((false, None)); + } + (true, _) => { + // done + self.mempool_sync_reset(); + return Ok((true, None)); + } + } + } + MempoolSyncState::SendQuery(ref url, ref addr, ref page_id) => { + // 3. ask for the remote peer's mempool's novel txs + debug!( + "{:?}: Mempool sync will query {} for mempool transactions at {}", + &self.local_peer, url, page_id + ); + match self.mempool_sync_send_query( + url, + addr, + mempool, + chainstate, + page_id.clone(), + )? { + (false, Some(event_id)) => { + // success! advance + debug!("{:?}: Mempool sync query {} for mempool transactions at {} on event {}", &self.local_peer, url, page_id, event_id); + self.mempool_state = + MempoolSyncState::RecvResponse(url.clone(), addr.clone(), event_id); + } + (false, None) => { + // try again later + return Ok((false, None)); + } + (true, _) => { + // done + self.mempool_sync_reset(); + return Ok((true, None)); + } + } + } + MempoolSyncState::RecvResponse(ref url, ref addr, ref event_id) => { + match self.mempool_sync_recv_response(*event_id)? { + (true, next_page_id_opt, Some(txs)) => { + debug!( + "{:?}: Mempool sync received {} transactions; next page is {:?}", + &self.local_peer, + txs.len(), + &next_page_id_opt + ); + + // done! got data + let ret = match next_page_id_opt { + Some(next_page_id) => { + // get the next page + self.mempool_state = MempoolSyncState::SendQuery( + url.clone(), + addr.clone(), + next_page_id, + ); + false + } + None => { + // done + self.mempool_sync_reset(); + true + } + }; + return Ok((ret, Some(txs))); + } + (true, _, None) => { + // done! did not get data + self.mempool_sync_reset(); + return Ok((true, None)); + } + (false, _, None) => { + // still receiving; try again later + return Ok((false, None)); + } + (false, _, Some(_)) => { + // should never happen + if cfg!(test) { + panic!("Reached invalid state in {:?}, aborting...", &cur_state); + } + warn!("Reached invalid state in {:?}, resetting...", &cur_state); + self.mempool_sync_reset(); + return Ok((true, None)); + } + } + } + } + } + } + /// Do the actual work in the state machine. /// Return true if we need to prune connections. fn do_network_work( &mut self, sortdb: &SortitionDB, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, dns_client_opt: &mut Option<&mut DNSClient>, download_backpressure: bool, @@ -3044,6 +3581,15 @@ impl PeerNetwork { let mut did_cycle = false; while !did_cycle { + // Make the p2p state machine more aggressive about going and fetching newly-discovered + // blocks that it gets notified about. That is, interrupt the state machine and go + // process the associated block download first. + if self.have_data_to_download && self.work_state == PeerNetworkWorkState::BlockInvSync { + self.have_data_to_download = false; + // forcibly advance + self.work_state = PeerNetworkWorkState::BlockDownload; + } + debug!( "{:?}: network work state is {:?}", &self.local_peer, &self.work_state @@ -3092,31 +3638,67 @@ impl PeerNetwork { .map(|neighbor| neighbor.addr) .collect(); - let mut have_always_allowed = false; + // have we finished a full pass of the inventory state machine on an + // always-allowed peer? + let mut finished_always_allowed_inv_sync = false; if always_allowed.len() == 0 { - have_always_allowed = true; + // vacuously, we have done so + finished_always_allowed_inv_sync = true; } else { + // do we have an always-allowed peer that we have not fully synced + // with? + let mut have_unsynced = false; if let Some(ref inv_state) = self.inv_state { for (nk, stats) in inv_state.block_stats.iter() { + if self.is_bound(&nk) { + // this is the same address we're bound to + continue; + } + if Some((nk.addrbytes.clone(), nk.port)) + == self.local_peer.public_ip_address + { + // this is a peer at our address + continue; + } if !always_allowed.contains(&nk) { + // this peer isn't in the always-allowed set continue; } if stats.inv.num_reward_cycles >= self.pox_id.num_inventory_reward_cycles() as u64 { + // we have fully sync'ed with an always-allowed peer debug!( "{:?}: Fully-sync'ed PoX inventory from {}", &self.local_peer, nk ); - have_always_allowed = true; + finished_always_allowed_inv_sync = true; + } else { + // there exists an always-allowed peer that we have not + // fully sync'ed with + debug!( + "{:?}: Have not fully sync'ed with {}", + &self.local_peer, &nk + ); + have_unsynced = true; } } } + + if !have_unsynced { + // There exists one or more always-allowed peers in + // the inv state machine (per the peer DB), but all such peers + // report either our bind address or our public IP address. + // If this is the case (i.e. a configuration error, a weird + // case where nodes share an IP, etc), then we declare this inv + // sync pass as finished. + finished_always_allowed_inv_sync = true; + } } - if have_always_allowed { + if finished_always_allowed_inv_sync { debug!("{:?}: synchronized inventories with at least one always-allowed peer", &self.local_peer); self.num_inv_sync_passes += 1; } else { @@ -3177,10 +3759,12 @@ impl PeerNetwork { downloader.hint_block_sortition_height_available( start_download_sortition, ibd, + false, ); downloader.hint_microblock_sortition_height_available( start_download_sortition, ibd, + false, ); } else { warn!( @@ -3197,6 +3781,7 @@ impl PeerNetwork { Some(ref mut dns_client) => { if self.do_network_block_download( sortdb, + mempool, chainstate, *dns_client, ibd, @@ -3264,6 +3849,7 @@ impl PeerNetwork { fn do_attachment_downloads( &mut self, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, mut dns_client_opt: Option<&mut DNSClient>, network_result: &mut NetworkResult, @@ -3283,7 +3869,7 @@ impl PeerNetwork { self, |network, attachments_downloader| { let mut dead_events = vec![]; - match attachments_downloader.run(dns_client, chainstate, network) { + match attachments_downloader.run(dns_client, mempool, chainstate, network) { Ok((ref mut attachments, ref mut events_to_deregister)) => { network_result.attachments.append(attachments); dead_events.append(events_to_deregister); @@ -3597,6 +4183,31 @@ impl PeerNetwork { } } + /// Do we need a block or microblock stream, given its sortition's consensus hash? + fn need_block_or_microblock_stream( + sortdb: &SortitionDB, + chainstate: &StacksChainState, + consensus_hash: &ConsensusHash, + is_microblock: bool, + ) -> Result { + let sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &consensus_hash)? + .ok_or(chainstate_error::NoSuchBlockError)?; + let block_hash_opt = if sn.sortition { + Some(sn.winning_stacks_block_hash) + } else { + None + }; + + let inv = chainstate.get_blocks_inventory(&[(consensus_hash.clone(), block_hash_opt)])?; + if is_microblock { + // checking for microblock absence + Ok(inv.microblocks_bitvec[0] == 0) + } else { + // checking for block absence + Ok(inv.block_bitvec[0] == 0) + } + } + /// Handle unsolicited BlocksAvailable. /// Update our inv for this peer. /// Mask errors. @@ -3604,8 +4215,10 @@ impl PeerNetwork { fn handle_unsolicited_BlocksAvailable( &mut self, sortdb: &SortitionDB, + chainstate: &StacksChainState, event_id: usize, new_blocks: &BlocksAvailableData, + ibd: bool, buffer: bool, ) -> bool { let outbound_neighbor_key = match self.find_outbound_neighbor(event_id) { @@ -3651,12 +4264,45 @@ impl PeerNetwork { } }; - // have the downloader request this block if it's new - match self.block_downloader { - Some(ref mut downloader) => { - downloader.hint_block_sortition_height_available(block_sortition_height, false); + let need_block = match PeerNetwork::need_block_or_microblock_stream( + sortdb, + chainstate, + &consensus_hash, + false, + ) { + Ok(x) => x, + Err(e) => { + warn!( + "Failed to determine if we need block for consensus hash {}: {:?}", + &consensus_hash, &e + ); + false + } + }; + + debug!( + "Need block {}/{}? {}", + &consensus_hash, &block_hash, need_block + ); + + if need_block { + // have the downloader request this block if it's new and we don't have it + match self.block_downloader { + Some(ref mut downloader) => { + downloader.hint_block_sortition_height_available( + block_sortition_height, + ibd, + need_block, + ); + + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.local_peer, block_sortition_height); + } + self.have_data_to_download = true; + } + None => {} } - None => {} } } @@ -3670,8 +4316,10 @@ impl PeerNetwork { fn handle_unsolicited_MicroblocksAvailable( &mut self, sortdb: &SortitionDB, + chainstate: &StacksChainState, event_id: usize, new_mblocks: &BlocksAvailableData, + ibd: bool, buffer: bool, ) -> bool { let outbound_neighbor_key = match self.find_outbound_neighbor(event_id) { @@ -3689,7 +4337,6 @@ impl PeerNetwork { ); let mut to_buffer = false; - for (consensus_hash, block_hash) in new_mblocks.available.iter() { let mblock_sortition_height = match self.handle_unsolicited_inv_update( sortdb, @@ -3718,13 +4365,42 @@ impl PeerNetwork { } }; - // have the downloader request this block if it's new - match self.block_downloader { - Some(ref mut downloader) => { - downloader - .hint_microblock_sortition_height_available(mblock_sortition_height, false); + let need_microblock_stream = match PeerNetwork::need_block_or_microblock_stream( + sortdb, + chainstate, + &consensus_hash, + true, + ) { + Ok(x) => x, + Err(e) => { + warn!("Failed to determine if we need microblock stream for consensus hash {}: {:?}", &consensus_hash, &e); + false + } + }; + + debug!( + "Need microblock stream {}/{}? {}", + &consensus_hash, &block_hash, need_microblock_stream + ); + + if need_microblock_stream { + // have the downloader request this microblock stream if it's new to us + match self.block_downloader { + Some(ref mut downloader) => { + downloader.hint_microblock_sortition_height_available( + mblock_sortition_height, + ibd, + need_microblock_stream, + ); + + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of microblock stream {}", &self.local_peer, mblock_sortition_height); + } + self.have_data_to_download = true; + } + None => {} } - None => {} } } to_buffer @@ -3939,6 +4615,7 @@ impl PeerNetwork { chainstate: &StacksChainState, event_id: usize, payload: &StacksMessageType, + ibd: bool, buffer: bool, ) -> (bool, bool) { match payload { @@ -3949,15 +4626,18 @@ impl PeerNetwork { // conversation and use _that_ conversation's neighbor key to identify // which inventory we need to update. StacksMessageType::BlocksAvailable(ref new_blocks) => { - let to_buffer = - self.handle_unsolicited_BlocksAvailable(sortdb, event_id, new_blocks, buffer); + let to_buffer = self.handle_unsolicited_BlocksAvailable( + sortdb, chainstate, event_id, new_blocks, ibd, buffer, + ); (to_buffer, false) } StacksMessageType::MicroblocksAvailable(ref new_mblocks) => { let to_buffer = self.handle_unsolicited_MicroblocksAvailable( sortdb, + chainstate, event_id, new_mblocks, + ibd, buffer, ); (to_buffer, false) @@ -3994,6 +4674,7 @@ impl PeerNetwork { sortdb: &SortitionDB, chainstate: &StacksChainState, unsolicited: HashMap>, + ibd: bool, buffer: bool, ) -> Result>, net_error> { let mut unhandled: HashMap> = HashMap::new(); @@ -4031,6 +4712,7 @@ impl PeerNetwork { chainstate, event_id, &message.payload, + ibd, buffer, ); if buffer && to_buffer { @@ -4206,11 +4888,18 @@ impl PeerNetwork { Ok(()) } - /// Refresh view of burnchain, if needed + /// Refresh view of burnchain, if needed. + /// If the burnchain view changes, then take the following additional steps: + /// * hint to the inventory sync state-machine to restart, since we potentially have a new + /// block to go fetch + /// * hint to the download state machine to start looking for the new block at the new + /// stable sortition height + /// * hint to the antientropy protocol to reset to the latest reward cycle pub fn refresh_burnchain_view( &mut self, sortdb: &SortitionDB, chainstate: &StacksChainState, + ibd: bool, ) -> Result>, net_error> { // update burnchain snapshot if we need to (careful -- it's expensive) let sn = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn())?; @@ -4243,6 +4932,12 @@ impl PeerNetwork { false, ); + // set up the antientropy protocol to try pushing the latest block + // (helps if you're a miner who gets temporarily disconnected) + self.antientropy_last_push_ts = get_epoch_time_secs(); + self.antientropy_start_reward_cycle = + self.pox_id.num_inventory_reward_cycles().saturating_sub(1) as u64; + // update cached burnchain view for /v2/info self.chain_view = new_chain_view; self.chain_view_stable_consensus_hash = new_chain_view_stable_consensus_hash; @@ -4251,7 +4946,13 @@ impl PeerNetwork { if sn.burn_header_hash != self.burnchain_tip.burn_header_hash { // try processing previously-buffered messages (best-effort) let buffered_messages = mem::replace(&mut self.pending_messages, HashMap::new()); - ret = self.handle_unsolicited_messages(sortdb, chainstate, buffered_messages, false)?; + ret = self.handle_unsolicited_messages( + sortdb, + chainstate, + buffered_messages, + ibd, + false, + )?; } // update cached stacks chain view for /v2/info @@ -4269,6 +4970,7 @@ impl PeerNetwork { &mut self, network_result: &mut NetworkResult, sortdb: &SortitionDB, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, mut dns_client_opt: Option<&mut DNSClient>, download_backpressure: bool, @@ -4300,7 +5002,7 @@ impl PeerNetwork { self.deregister_peer(error_event); } let unhandled_messages = - self.handle_unsolicited_messages(sortdb, chainstate, unsolicited_messages, true)?; + self.handle_unsolicited_messages(sortdb, chainstate, unsolicited_messages, ibd, true)?; network_result.consume_unsolicited(unhandled_messages); // schedule now-authenticated inbound convos for pingback @@ -4311,6 +5013,7 @@ impl PeerNetwork { // an already-used network ID. let do_prune = self.do_network_work( sortdb, + mempool, chainstate, &mut dns_client_opt, download_backpressure, @@ -4330,17 +5033,21 @@ impl PeerNetwork { self.deregister_peer(dead); } self.prune_connections(); - let outbound_neighbors = PeerNetwork::count_outbound_conversations(&self.peers); - let inbound_neighbors = self.peers.len() - outbound_neighbors as usize; - update_outbound_neighbors(outbound_neighbors as i64); - update_inbound_neighbors(inbound_neighbors as i64); } - // In parallel, do a neighbor walk, but only if we're not doing the initial block download - self.do_network_neighbor_walk()?; + // In parallel, do a neighbor walk + self.do_network_neighbor_walk(ibd)?; + + // In parallel, do a mempool sync. + // Remember any txs we get, so we can feed them to the relayer thread. + if let Some(mut txs) = + self.do_network_mempool_sync(&mut dns_client_opt, mempool, chainstate, ibd)? + { + network_result.synced_transactions.append(&mut txs); + } // download attachments - self.do_attachment_downloads(chainstate, dns_client_opt, network_result)?; + self.do_attachment_downloads(mempool, chainstate, dns_client_opt, network_result)?; // remove timed-out requests from other threads for (_, convo) in self.peers.iter_mut() { @@ -4382,6 +5089,11 @@ impl PeerNetwork { // do this after processing new sockets, so we don't accidentally re-use an event ID. self.dispatch_requests(); + let outbound_neighbors = PeerNetwork::count_outbound_conversations(&self.peers); + let inbound_neighbors = self.peers.len() - outbound_neighbors as usize; + update_outbound_neighbors(outbound_neighbors as i64); + update_inbound_neighbors(inbound_neighbors as i64); + // fault injection -- periodically disconnect from everyone if cfg!(test) { if let Some(disconnect_interval) = self.connection_opts.force_disconnect_interval { @@ -4485,6 +5197,18 @@ impl PeerNetwork { } // (HTTP-uploaded transactions are already in the mempool) + // Mempool-synced transactions (don't re-relay these) + for tx in network_result.synced_transactions.drain(..) { + PeerNetwork::store_transaction( + mempool, + sortdb, + chainstate, + &canonical_consensus_hash, + &canonical_block_hash, + tx, + event_observer, + ); + } network_result.pushed_transactions.extend(ret); Ok(()) @@ -4538,7 +5262,7 @@ impl PeerNetwork { self.refresh_local_peer()?; // update burnchain view, before handling any HTTP connections - let unsolicited_buffered_messages = self.refresh_burnchain_view(sortdb, chainstate)?; + let unsolicited_buffered_messages = self.refresh_burnchain_view(sortdb, chainstate, ibd)?; network_result.consume_unsolicited(unsolicited_buffered_messages); // update PoX view, before handling any HTTP connections @@ -4581,6 +5305,7 @@ impl PeerNetwork { self.dispatch_network( &mut network_result, sortdb, + mempool, chainstate, dns_client_opt, download_backpressure, @@ -4608,11 +5333,16 @@ mod test { use net::atlas::*; use net::codec::*; use net::db::*; + use net::test::*; use net::*; use util::log; use util::sleep_ms; use util::test::*; + use chainstate::stacks::{ + C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + }; + use crate::types::chainstate::BurnchainHeaderHash; use super::*; @@ -4964,4 +5694,459 @@ mod test { test_debug!("fake endpoint thread joined"); }) } + + #[test] + #[ignore] + fn test_mempool_sync_2_peers() { + with_timeout(600, || { + // peer 1 gets some transactions; verify peer 2 gets the recent ones and not the old + // ones + let mut peer_1_config = TestPeerConfig::new("test_mempool_sync_2_peers", 2210, 2211); + let mut peer_2_config = TestPeerConfig::new("test_mempool_sync_2_peers", 2212, 2213); + + peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + + peer_1_config.connection_opts.mempool_sync_interval = 1; + peer_2_config.connection_opts.mempool_sync_interval = 1; + + let num_txs = 10; + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let initial_balances: Vec<_> = addrs + .iter() + .map(|a| (a.to_account_principal(), 1000000000)) + .collect(); + + peer_1_config.initial_balances = initial_balances.clone(); + peer_2_config.initial_balances = initial_balances.clone(); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + let num_blocks = 10; + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peer_1.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + sn.block_height + 1 + }; + + for i in 0..(num_blocks / 2) { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let addr = StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + bytes: Hash160([0xff; 20]), + }; + + // old transactions + let num_txs = 10; + let mut old_txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + old_txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &ConsensusHash([0x1 + (num_blocks as u8); 20]), + &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), + txid.clone(), + tx_bytes, + tx_fee, + (num_blocks / 2) as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + // keep mining to make these txs old + for i in (num_blocks / 2)..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer_1.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + sn.block_height + 1 + }; + + let mut txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(1); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &ConsensusHash([0x1 + (num_blocks as u8); 20]), + &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), + txid.clone(), + tx_bytes, + tx_fee, + num_blocks as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + let mut round = 0; + let mut peer_1_mempool_txs = 0; + let mut peer_2_mempool_txs = 0; + + while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { + if let Ok(mut result) = peer_1.step() { + let lp = peer_1.network.local_peer.clone(); + peer_1 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + if let Ok(mut result) = peer_2.step() { + let lp = peer_2.network.local_peer.clone(); + peer_2 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + round += 1; + + let mp = peer_1.mempool.take().unwrap(); + peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_1.mempool.replace(mp); + + let mp = peer_2.mempool.take().unwrap(); + peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_2.mempool.replace(mp); + + info!( + "Peer 1: {}, Peer 2: {}", + peer_1_mempool_txs, peer_2_mempool_txs + ); + } + + info!("Completed mempool sync in {} step(s)", round); + + let mp = peer_2.mempool.take().unwrap(); + let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); + peer_2.mempool.replace(mp); + + // peer 2 has all the recent txs + // peer 2 has none of the old ones + for tx in peer_2_mempool_txs { + assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); + assert!(old_txs.get(&tx.tx.txid()).is_none()); + } + }); + } + + #[test] + #[ignore] + fn test_mempool_sync_2_peers_paginated() { + with_timeout(600, || { + // peer 1 gets some transactions; verify peer 2 gets them all + let mut peer_1_config = + TestPeerConfig::new("test_mempool_sync_2_peers_paginated", 2214, 2215); + let mut peer_2_config = + TestPeerConfig::new("test_mempool_sync_2_peers_paginated", 2216, 2217); + + peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + + peer_1_config.connection_opts.mempool_sync_interval = 1; + peer_2_config.connection_opts.mempool_sync_interval = 1; + + let num_txs = 1024; + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let initial_balances: Vec<_> = addrs + .iter() + .map(|a| (a.to_account_principal(), 1000000000)) + .collect(); + + peer_1_config.initial_balances = initial_balances.clone(); + peer_2_config.initial_balances = initial_balances.clone(); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + let num_blocks = 10; + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peer_1.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + sn.block_height + 1 + }; + + for i in 0..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let addr = StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + bytes: Hash160([0xff; 20]), + }; + + // fill peer 1 with lots of transactions + let mut txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &ConsensusHash([0x1 + (num_blocks as u8); 20]), + &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), + txid.clone(), + tx_bytes, + tx_fee, + num_blocks, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer_1.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + sn.block_height + 1 + }; + + let mut round = 0; + let mut peer_1_mempool_txs = 0; + let mut peer_2_mempool_txs = 0; + + while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { + if let Ok(mut result) = peer_1.step() { + let lp = peer_1.network.local_peer.clone(); + peer_1 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + if let Ok(mut result) = peer_2.step() { + let lp = peer_2.network.local_peer.clone(); + peer_2 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + round += 1; + + let mp = peer_1.mempool.take().unwrap(); + peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_1.mempool.replace(mp); + + let mp = peer_2.mempool.take().unwrap(); + peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_2.mempool.replace(mp); + + info!( + "Peer 1: {}, Peer 2: {}", + peer_1_mempool_txs, peer_2_mempool_txs + ); + } + + info!("Completed mempool sync in {} step(s)", round); + + let mp = peer_2.mempool.take().unwrap(); + let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); + peer_2.mempool.replace(mp); + + for tx in peer_2_mempool_txs { + assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); + } + }); + } } diff --git a/src/net/relay.rs b/src/net/relay.rs index 963ed5c05d..bade06172b 100644 --- a/src/net/relay.rs +++ b/src/net/relay.rs @@ -55,6 +55,7 @@ use crate::chainstate::coordinator::BlockEventDispatcher; use crate::types::chainstate::{PoxId, SortitionId}; use chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use codec::MAX_PAYLOAD_LEN; +use monitoring::update_stacks_tip_height; use types::chainstate::BurnchainHeaderHash; pub type BlocksAvailableMap = HashMap; @@ -617,15 +618,15 @@ impl Relayer { } /// Preprocess all our downloaded blocks. - /// Return burn block hashes for the blocks that we got. /// Does not fail on invalid blocks; just logs a warning. - /// Returns the set of consensus hashes for the sortitions that selected these blocks + /// Returns the set of consensus hashes for the sortitions that selected these blocks, and the + /// blocks themselves fn preprocess_downloaded_blocks( sort_ic: &SortitionDBConn, network_result: &mut NetworkResult, chainstate: &mut StacksChainState, - ) -> HashSet { - let mut new_blocks = HashSet::new(); + ) -> HashMap { + let mut new_blocks = HashMap::new(); for (consensus_hash, block, download_time) in network_result.blocks.iter() { match Relayer::process_new_anchored_block( @@ -637,7 +638,7 @@ impl Relayer { ) { Ok(accepted) => { if accepted { - new_blocks.insert((*consensus_hash).clone()); + new_blocks.insert((*consensus_hash).clone(), block.clone()); } } Err(chainstate_error::InvalidStacksBlock(msg)) => { @@ -668,8 +669,8 @@ impl Relayer { sort_ic: &SortitionDBConn, network_result: &mut NetworkResult, chainstate: &mut StacksChainState, - ) -> Result<(HashSet, Vec), net_error> { - let mut new_blocks = HashSet::new(); + ) -> Result<(HashMap, Vec), net_error> { + let mut new_blocks = HashMap::new(); let mut bad_neighbors = vec![]; // process blocks pushed to us. @@ -725,7 +726,7 @@ impl Relayer { "Accepted block {}/{} from {}", &consensus_hash, &bhh, &neighbor_key ); - new_blocks.insert(consensus_hash.clone()); + new_blocks.insert(consensus_hash.clone(), block.clone()); } } Err(chainstate_error::InvalidStacksBlock(msg)) => { @@ -759,8 +760,8 @@ impl Relayer { fn preprocess_downloaded_microblocks( network_result: &mut NetworkResult, chainstate: &mut StacksChainState, - ) -> HashSet { - let mut ret = HashSet::new(); + ) -> HashMap)> { + let mut ret = HashMap::new(); for (consensus_hash, microblock_stream, _download_time) in network_result.confirmed_microblocks.iter() { @@ -788,7 +789,12 @@ impl Relayer { } } - ret.insert((*consensus_hash).clone()); + let index_block_hash = + StacksBlockHeader::make_index_block_hash(consensus_hash, &anchored_block_hash); + ret.insert( + (*consensus_hash).clone(), + (index_block_hash, microblock_stream.clone()), + ); } ret } @@ -904,8 +910,8 @@ impl Relayer { /// Process blocks and microblocks that we recieved, both downloaded (confirmed) and streamed /// (unconfirmed). Returns: - /// * list of consensus hashes that elected the newly-discovered blocks, so we can turn them into BlocksAvailable messages - /// * list of confirmed microblock consensus hashes for newly-discovered microblock streams, so we can turn them into MicroblocksAvailable messages + /// * set of consensus hashes that elected the newly-discovered blocks, and the blocks, so we can turn them into BlocksAvailable / BlocksData messages + /// * set of confirmed microblock consensus hashes for newly-discovered microblock streams, and the streams, so we can turn them into MicroblocksAvailable / MicroblocksData messages /// * list of unconfirmed microblocks that got pushed to us, as well as their relayers (so we can forward them) /// * list of neighbors that served us invalid data (so we can ban them) pub fn process_new_blocks( @@ -915,15 +921,14 @@ impl Relayer { coord_comms: Option<&CoordinatorChannels>, ) -> Result< ( - Vec, - Vec, + HashMap, + HashMap)>, Vec<(Vec, MicroblocksData)>, Vec, ), net_error, > { - let mut new_blocks = HashSet::new(); - let mut new_confirmed_microblocks = HashSet::new(); + let mut new_blocks = HashMap::new(); let mut bad_neighbors = vec![]; { @@ -932,35 +937,48 @@ impl Relayer { // process blocks we downloaded let new_dled_blocks = Relayer::preprocess_downloaded_blocks(&sort_ic, network_result, chainstate); - for new_dled_block in new_dled_blocks.into_iter() { - debug!("Received downloaded block for {}", &new_dled_block); - new_blocks.insert(new_dled_block); + for (new_dled_block_ch, block_data) in new_dled_blocks.into_iter() { + debug!( + "Received downloaded block for {}/{}", + &new_dled_block_ch, + &block_data.block_hash(); + "consensus_hash" => %new_dled_block_ch, + "block_hash" => %block_data.block_hash() + ); + new_blocks.insert(new_dled_block_ch, block_data); } // process blocks pushed to us let (new_pushed_blocks, mut new_bad_neighbors) = Relayer::preprocess_pushed_blocks(&sort_ic, network_result, chainstate)?; - for new_pushed_block in new_pushed_blocks.into_iter() { - debug!("Received p2p-pushed block for {}", &new_pushed_block); - new_blocks.insert(new_pushed_block); + for (new_pushed_block_ch, block_data) in new_pushed_blocks.into_iter() { + debug!( + "Received p2p-pushed block for {}/{}", + &new_pushed_block_ch, + &block_data.block_hash(); + "consensus_hash" => %new_pushed_block_ch, + "block_hash" => %block_data.block_hash() + ); + new_blocks.insert(new_pushed_block_ch, block_data); } bad_neighbors.append(&mut new_bad_neighbors); // process blocks uploaded to us. They've already been stored for block_data in network_result.uploaded_blocks.drain(..) { - for (consensus_hash, _) in block_data.blocks.into_iter() { - debug!("Received http-uploaded block for {}", &consensus_hash); - new_blocks.insert(consensus_hash); + for (consensus_hash, block) in block_data.blocks.into_iter() { + debug!( + "Received http-uploaded block for {}/{}", + &consensus_hash, + block.block_hash() + ); + new_blocks.insert(consensus_hash, block); } } } // process microblocks we downloaded - let mut new_dled_mblocks = + let new_confirmed_microblocks = Relayer::preprocess_downloaded_microblocks(network_result, chainstate); - for new_dled_mblock in new_dled_mblocks.drain() { - new_confirmed_microblocks.insert(new_dled_mblock); - } // process microblocks pushed to us let (new_microblocks, mut new_bad_neighbors) = @@ -983,8 +1001,8 @@ impl Relayer { } Ok(( - new_blocks.into_iter().collect(), - new_confirmed_microblocks.into_iter().collect(), + new_blocks, + new_confirmed_microblocks, new_microblocks, bad_neighbors, )) @@ -1066,12 +1084,17 @@ impl Relayer { MemPoolDB::garbage_collect(&mut mempool_tx, min_height, event_observer)?; mempool_tx.commit()?; } + update_stacks_tip_height(chain_height as i64); Ok(ret) } - pub fn advertize_blocks(&mut self, available: BlocksAvailableMap) -> Result<(), net_error> { - self.p2p.advertize_blocks(available) + pub fn advertize_blocks( + &mut self, + available: BlocksAvailableMap, + blocks: HashMap, + ) -> Result<(), net_error> { + self.p2p.advertize_blocks(available, blocks) } pub fn broadcast_block( @@ -1185,6 +1208,7 @@ impl Relayer { sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, mempool: &mut MemPoolDB, + ibd: bool, coord_comms: Option<&CoordinatorChannels>, event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result { @@ -1204,25 +1228,49 @@ impl Relayer { } // have the p2p thread tell our neighbors about newly-discovered blocks - let available = Relayer::load_blocks_available_data(sortdb, new_blocks)?; + let new_block_chs = new_blocks.iter().map(|(ch, _)| ch.clone()).collect(); + let available = Relayer::load_blocks_available_data(sortdb, new_block_chs)?; if available.len() > 0 { - debug!("{:?}: Blocks available: {}", &_local_peer, available.len()); - if let Err(e) = self.p2p.advertize_blocks(available) { - warn!("Failed to advertize new blocks: {:?}", &e); + if !ibd { + debug!("{:?}: Blocks available: {}", &_local_peer, available.len()); + if let Err(e) = self.p2p.advertize_blocks(available, new_blocks) { + warn!("Failed to advertize new blocks: {:?}", &e); + } + } else { + debug!( + "{:?}: Blocks available, but will not advertize since in IBD: {}", + &_local_peer, + available.len() + ); } } // have the p2p thread tell our neighbors about newly-discovered confirmed microblock streams + let new_mblock_chs = new_confirmed_microblocks + .iter() + .map(|(ch, _)| ch.clone()) + .collect(); let mblocks_available = - Relayer::load_blocks_available_data(sortdb, new_confirmed_microblocks)?; + Relayer::load_blocks_available_data(sortdb, new_mblock_chs)?; if mblocks_available.len() > 0 { - debug!( - "{:?}: Confirmed microblock streams available: {}", - &_local_peer, - mblocks_available.len() - ); - if let Err(e) = self.p2p.advertize_microblocks(mblocks_available) { - warn!("Failed to advertize new confirmed microblocks: {:?}", &e); + if !ibd { + debug!( + "{:?}: Confirmed microblock streams available: {}", + &_local_peer, + mblocks_available.len() + ); + if let Err(e) = self + .p2p + .advertize_microblocks(mblocks_available, new_confirmed_microblocks) + { + warn!("Failed to advertize new confirmed microblocks: {:?}", &e); + } + } else { + debug!( + "{:?}: Confirmed microblock streams available, but will not advertize since in IBD: {}", + &_local_peer, + mblocks_available.len() + ); } } @@ -1302,9 +1350,10 @@ impl Relayer { impl PeerNetwork { /// Find out which neighbors need at least one (micro)block from the availability set. - /// For outbound neighbors (i.e. ones we have inv data for), only send (Micro)BlocksAvailable messages - /// for (micro)blocks we have that they don't have. For inbound neighbors (i.e. ones we don't have - /// inv data for), pick a random set and send them the full (Micro)BlocksAvailable message. + /// For outbound neighbors (i.e. ones we have inv data for), send (Micro)BlocksData messages if + /// we can; fall back to (Micro)BlocksAvailable messages if we can't. + /// For inbound neighbors (i.e. ones we don't have inv data for), pick a random set and send them + /// the full (Micro)BlocksAvailable message. fn find_block_recipients( &mut self, available: &BlocksAvailableMap, @@ -1366,7 +1415,7 @@ impl PeerNetwork { fn advertize_to_peer( &mut self, recipient: &NeighborKey, - wanted: &Vec<(ConsensusHash, BurnchainHeaderHash)>, + wanted: &[(ConsensusHash, BurnchainHeaderHash)], mut msg_builder: S, ) -> () where @@ -1403,51 +1452,168 @@ impl PeerNetwork { } } + /// Try to push a block to a peer. + /// Absorb and log errors. + fn push_block_to_peer( + &mut self, + recipient: &NeighborKey, + consensus_hash: ConsensusHash, + block: StacksBlock, + ) -> () { + let blk_hash = block.block_hash(); + let ch = consensus_hash.clone(); + let payload = BlocksData { + blocks: vec![(consensus_hash, block)], + }; + let message = match self.sign_for_peer(recipient, StacksMessageType::Blocks(payload)) { + Ok(m) => m, + Err(e) => { + warn!( + "{:?}: Failed to sign for {:?}: {:?}", + &self.local_peer, recipient, &e + ); + return; + } + }; + + debug!( + "{:?}: Push block {}/{} to {:?}", + &self.local_peer, &ch, &blk_hash, recipient + ); + + // absorb errors + let _ = self.relay_signed_message(recipient, message).map_err(|e| { + warn!( + "{:?}: Failed to push block {}/{} to {:?}: {:?}", + &self.local_peer, &ch, &blk_hash, recipient, &e + ); + e + }); + } + + /// Try to push a confirmed microblock stream to a peer. + /// Absorb and log errors. + fn push_microblocks_to_peer( + &mut self, + recipient: &NeighborKey, + index_block_hash: StacksBlockId, + microblocks: Vec, + ) -> () { + let idx_bhh = index_block_hash.clone(); + let payload = MicroblocksData { + index_anchor_block: index_block_hash, + microblocks: microblocks, + }; + let message = match self.sign_for_peer(recipient, StacksMessageType::Microblocks(payload)) { + Ok(m) => m, + Err(e) => { + warn!( + "{:?}: Failed to sign for {:?}: {:?}", + &self.local_peer, recipient, &e + ); + return; + } + }; + + debug!( + "{:?}: Push microblocks for {} to {:?}", + &self.local_peer, &idx_bhh, recipient + ); + + // absorb errors + let _ = self.relay_signed_message(recipient, message).map_err(|e| { + warn!( + "{:?}: Failed to push microblocks for {} to {:?}: {:?}", + &self.local_peer, &idx_bhh, recipient, &e + ); + e + }); + } + /// Announce blocks that we have to an outbound peer that doesn't have them. - /// Only advertize blocks and microblocks we have that the outbound peer doesn't. - fn advertize_to_outbound_peer( + /// If we were given the block, send the block itself. + /// Otherwise, send a BlocksAvailable. + fn advertize_or_push_blocks_to_outbound_peer( &mut self, recipient: &NeighborKey, available: &BlocksAvailableMap, - microblocks: bool, + blocks: &HashMap, ) -> Result<(), net_error> { - let wanted = PeerNetwork::with_inv_state(self, |_network, inv_state| { - let mut wanted: Vec<(ConsensusHash, BurnchainHeaderHash)> = vec![]; + PeerNetwork::with_inv_state(self, |network, inv_state| { if let Some(stats) = inv_state.block_stats.get(recipient) { for (bhh, (block_height, ch)) in available.iter() { - let has_data = if microblocks { - stats.inv.has_ith_microblock_stream(*block_height) - } else { - stats.inv.has_ith_block(*block_height) - }; - - if !has_data { + if !stats.inv.has_ith_block(*block_height) { test_debug!( - "{:?}: Outbound neighbor {:?} wants {} data for {}", - &_network.local_peer, + "{:?}: Outbound neighbor {:?} wants block data for {}", + &network.local_peer, recipient, - if microblocks { "microblock" } else { "block" }, bhh ); - wanted.push(((*ch).clone(), (*bhh).clone())); + match blocks.get(ch) { + Some(block) => { + network.push_block_to_peer( + recipient, + (*ch).clone(), + (*block).clone(), + ); + } + None => { + network.advertize_to_peer( + recipient, + &[((*ch).clone(), (*bhh).clone())], + |payload| StacksMessageType::BlocksAvailable(payload), + ); + } + } } } } - Ok(wanted) - })?; + Ok(()) + }) + } - if microblocks { - self.advertize_to_peer(recipient, &wanted, |payload| { - StacksMessageType::MicroblocksAvailable(payload) - }); - } else { - self.advertize_to_peer(recipient, &wanted, |payload| { - StacksMessageType::BlocksAvailable(payload) - }); - } + /// Announce microblocks that we have to an outbound peer that doesn't have them. + /// If we were given the microblock stream, send the stream itself. + /// Otherwise, send a MicroblocksAvailable. + fn advertize_or_push_microblocks_to_outbound_peer( + &mut self, + recipient: &NeighborKey, + available: &BlocksAvailableMap, + microblocks: &HashMap)>, + ) -> Result<(), net_error> { + PeerNetwork::with_inv_state(self, |network, inv_state| { + if let Some(stats) = inv_state.block_stats.get(recipient) { + for (bhh, (block_height, ch)) in available.iter() { + if !stats.inv.has_ith_microblock_stream(*block_height) { + test_debug!( + "{:?}: Outbound neighbor {:?} wants microblock data for {}", + &network.local_peer, + recipient, + bhh + ); - Ok(()) + match microblocks.get(ch) { + Some((stacks_block_id, mblocks)) => { + network.push_microblocks_to_peer( + recipient, + stacks_block_id.clone(), + mblocks.clone(), + ); + } + None => { + network.advertize_to_peer( + recipient, + &[((*ch).clone(), (*bhh).clone())], + |payload| StacksMessageType::MicroblocksAvailable(payload), + ); + } + } + } + } + } + Ok(()) + }) } /// Announce blocks that we have to an inbound peer that might not have them. @@ -1473,13 +1639,16 @@ impl PeerNetwork { /// Announce blocks that we have to a subset of inbound and outbound peers. /// * Outbound peers receive announcements for blocks that we know they don't have, based on - /// the inv state we synchronized from them. + /// the inv state we synchronized from them. We send the blocks themselves, if we have them. /// * Inbound peers are chosen uniformly at random to receive a full announcement, since we - /// don't track their inventory state. + /// don't track their inventory state. We send blocks-available messages to them, since they + /// can turn around and ask us for the block data. + /// Return the number of inbound and outbound neighbors that have received it pub fn advertize_blocks( &mut self, availability_data: BlocksAvailableMap, - ) -> Result<(), net_error> { + blocks: HashMap, + ) -> Result<(usize, usize), net_error> { let (mut outbound_recipients, mut inbound_recipients) = self.find_block_recipients(&availability_data)?; debug!( @@ -1490,6 +1659,9 @@ impl PeerNetwork { inbound_recipients.len() ); + let num_inbound = inbound_recipients.len(); + let num_outbound = outbound_recipients.len(); + for recipient in outbound_recipients.drain(..) { debug!( "{:?}: Advertize {} blocks to outbound peer {}", @@ -1497,7 +1669,11 @@ impl PeerNetwork { availability_data.len(), &recipient ); - self.advertize_to_outbound_peer(&recipient, &availability_data, false)?; + self.advertize_or_push_blocks_to_outbound_peer( + &recipient, + &availability_data, + &blocks, + )?; } for recipient in inbound_recipients.drain(..) { debug!( @@ -1510,7 +1686,7 @@ impl PeerNetwork { StacksMessageType::BlocksAvailable(payload) })?; } - Ok(()) + Ok((num_inbound, num_outbound)) } /// Announce confirmed microblocks that we have to a subset of inbound and outbound peers. @@ -1518,14 +1694,19 @@ impl PeerNetwork { /// the inv state we synchronized from them. /// * Inbound peers are chosen uniformly at random to receive a full announcement, since we /// don't track their inventory state. + /// Return the number of inbound and outbound neighbors that have received it pub fn advertize_microblocks( &mut self, availability_data: BlocksAvailableMap, - ) -> Result<(), net_error> { + microblocks: HashMap)>, + ) -> Result<(usize, usize), net_error> { let (mut outbound_recipients, mut inbound_recipients) = self.find_block_recipients(&availability_data)?; debug!("{:?}: Advertize {} confirmed microblock streams to {} inbound peers, {} outbound peers", &self.local_peer, availability_data.len(), outbound_recipients.len(), inbound_recipients.len()); + let num_inbound = inbound_recipients.len(); + let num_outbound = outbound_recipients.len(); + for recipient in outbound_recipients.drain(..) { debug!( "{:?}: Advertize {} confirmed microblock streams to outbound peer {}", @@ -1533,7 +1714,11 @@ impl PeerNetwork { availability_data.len(), &recipient ); - self.advertize_to_outbound_peer(&recipient, &availability_data, true)?; + self.advertize_or_push_microblocks_to_outbound_peer( + &recipient, + &availability_data, + µblocks, + )?; } for recipient in inbound_recipients.drain(..) { debug!( @@ -1546,7 +1731,7 @@ impl PeerNetwork { StacksMessageType::MicroblocksAvailable(payload) })?; } - Ok(()) + Ok((num_inbound, num_outbound)) } /// Update accounting information for relayed messages from a network result. @@ -1998,7 +2183,19 @@ mod test { peer_configs[1].connection_opts.disable_natpunch = true; peer_configs[2].connection_opts.disable_natpunch = true; + // do not push blocks and microblocks; only announce them + peer_configs[0].connection_opts.disable_block_push = true; + peer_configs[1].connection_opts.disable_block_push = true; + peer_configs[2].connection_opts.disable_block_push = true; + + peer_configs[0].connection_opts.disable_microblock_push = true; + peer_configs[1].connection_opts.disable_microblock_push = true; + peer_configs[2].connection_opts.disable_microblock_push = true; + // generous timeouts + peer_configs[0].connection_opts.connect_timeout = 180; + peer_configs[1].connection_opts.connect_timeout = 180; + peer_configs[2].connection_opts.connect_timeout = 180; peer_configs[0].connection_opts.timeout = 180; peer_configs[1].connection_opts.timeout = 180; peer_configs[2].connection_opts.timeout = 180; @@ -2009,8 +2206,6 @@ mod test { peer_configs[0].add_neighbor(&peer_1); peer_configs[1].add_neighbor(&peer_0); - - // peer_configs[1].add_neighbor(&peer_2); peer_configs[2].add_neighbor(&peer_1); }, |num_blocks, ref mut peers| { @@ -2075,7 +2270,37 @@ mod test { }, |ref mut peers| { // make sure peer 2's inv has an entry for peer 1, even - // though it's not doing an inv sync + // though it's not doing an inv sync. This is required for the downloader to + // work, and for (Micro)BlocksAvailable messages to be accepted + let peer_1_nk = peers[1].to_neighbor().addr; + let peer_2_nk = peers[2].to_neighbor().addr; + let bc = peers[1].config.burnchain.clone(); + match peers[2].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_1_nk).is_none() { + test_debug!("initialize inv statistics for peer 1 in peer 2"); + inv_state.add_peer(peer_1_nk.clone(), true); + if let Some(ref mut stats) = inv_state.get_stats_mut(&peer_1_nk) { + stats.scans = 1; + stats.inv.merge_pox_inv(&bc, 0, 6, vec![0xff], false); + stats.inv.merge_blocks_inv( + 0, + 30, + vec![0, 0, 0, 0, 0], + vec![0, 0, 0, 0, 0], + false, + ); + } else { + panic!("Unable to instantiate inv stats for {:?}", &peer_1_nk); + } + } else { + test_debug!("peer 2 has inv state for peer 1"); + } + } + None => { + test_debug!("No inv state for peer 1"); + } + } let tip = SortitionDB::get_canonical_burn_chain_tip( &peers[0].sortdb.as_ref().unwrap().conn(), @@ -2448,7 +2673,8 @@ mod test { let mut request = HttpRequestMetadata::new("127.0.0.1".to_string(), http_port); request.keep_alive = false; let tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); - let post_microblock = HttpRequestType::PostMicroblock(request, mblock.clone(), Some(tip)); + let post_microblock = + HttpRequestType::PostMicroblock(request, mblock.clone(), TipRequest::SpecificTip(tip)); let response = http_rpc(http_port, post_microblock).unwrap(); if let HttpResponseType::MicroblockHash(..) = response { return true; @@ -2457,7 +2683,10 @@ mod test { } } - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(outbound_test: bool) { + fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( + outbound_test: bool, + disable_push: bool, + ) { with_timeout(600, move || { let original_blocks_and_microblocks = RefCell::new(vec![]); let blocks_and_microblocks = RefCell::new(vec![]); @@ -2487,6 +2716,14 @@ mod test { peer_configs[0].connection_opts.disable_natpunch = true; peer_configs[1].connection_opts.disable_natpunch = true; + // force usage of blocksavailable/microblocksavailable? + if disable_push { + peer_configs[0].connection_opts.disable_block_push = true; + peer_configs[0].connection_opts.disable_microblock_push = true; + peer_configs[1].connection_opts.disable_block_push = true; + peer_configs[1].connection_opts.disable_microblock_push = true; + } + let peer_0 = peer_configs[0].to_neighbor(); let peer_1 = peer_configs[1].to_neighbor(); @@ -2561,15 +2798,24 @@ mod test { block_data }, |ref mut peers| { - // make sure peer 2's inv has an entry for peer 1, even - // though it's not doing an inv sync + if !disable_push { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = + BurnchainHeaderHash([0u8; 32]); + } + } + + // make sure peer 1's inv has an entry for peer 0, even + // though it's not doing an inv sync. This is required for the downloader to + // work let peer_0_nk = peers[0].to_neighbor().addr; let peer_1_nk = peers[1].to_neighbor().addr; match peers[1].network.inv_state { Some(ref mut inv_state) => { if inv_state.get_stats(&peer_0_nk).is_none() { test_debug!("initialize inv statistics for peer 0 in peer 1"); - inv_state.add_peer(peer_0_nk, true); + inv_state.add_peer(peer_0_nk.clone(), true); } else { test_debug!("peer 1 has inv state for peer 0"); } @@ -2688,15 +2934,33 @@ mod test { #[test] #[ignore] fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound() { - // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true) + // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. + // nodes rely on blocksavailable/microblocksavailable to discover blocks + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, true) } #[test] #[ignore] fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound() { // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false) + // nodes rely on blocksavailable/microblocksavailable to discover blocks + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, true) + } + + #[test] + #[ignore] + fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound_direct() { + // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. + // nodes may push blocks and microblocks directly to each other + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, false) + } + + #[test] + #[ignore] + fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound_direct() { + // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT + // nodes may push blocks and microblocks directly to each other + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, false) } #[test] @@ -3696,10 +3960,6 @@ mod test { peer_configs[0].connection_opts.antientropy_retry = 1; peer_configs[1].connection_opts.antientropy_retry = 1; - // full rescan by default - peer_configs[0].connection_opts.full_inv_sync_interval = 1; - peer_configs[1].connection_opts.full_inv_sync_interval = 1; - // make peer 0 go slowly peer_configs[0].connection_opts.max_block_push = 2; peer_configs[0].connection_opts.max_microblock_push = 2; diff --git a/src/net/rpc.rs b/src/net/rpc.rs index 0e90d78ca6..5003a9ad88 100644 --- a/src/net/rpc.rs +++ b/src/net/rpc.rs @@ -44,7 +44,7 @@ use chainstate::burn::db::sortdb::SortitionDB; use chainstate::burn::ConsensusHash; use chainstate::stacks::db::blocks::CheckError; use chainstate::stacks::db::{ - blocks::MINIMUM_TX_FEE_RATE_PER_BYTE, BlockStreamData, StacksChainState, + blocks::MINIMUM_TX_FEE_RATE_PER_BYTE, StacksChainState, StreamCursor, }; use chainstate::stacks::Error as chain_error; use chainstate::stacks::*; @@ -60,12 +60,12 @@ use net::http::*; use net::p2p::PeerMap; use net::p2p::PeerNetwork; use net::relay::Relayer; -use net::ClientError; use net::Error as net_error; use net::HttpRequestMetadata; use net::HttpRequestType; use net::HttpResponseMetadata; use net::HttpResponseType; +use net::MemPoolSyncData; use net::MicroblocksData; use net::NeighborAddress; use net::NeighborsData; @@ -79,12 +79,14 @@ use net::UnconfirmedTransactionResponse; use net::UnconfirmedTransactionStatus; use net::UrlString; use net::HTTP_REQUEST_ID_RESERVED; +use net::MAX_HEADERS; use net::MAX_NEIGHBORS_DATA_LEN; use net::{ AccountEntryResponse, AttachmentPage, CallReadOnlyResponse, ContractSrcResponse, - GetAttachmentResponse, GetAttachmentsInvResponse, MapEntryResponse, + DataVarResponse, GetAttachmentResponse, GetAttachmentsInvResponse, MapEntryResponse, }; use net::{BlocksData, GetIsTraitImplementedResponse}; +use net::{ClientError, TipRequest}; use net::{RPCNeighbor, RPCNeighborsInfo}; use net::{RPCPeerInfoData, RPCPoxInfoData}; use util::db::DBConn; @@ -99,7 +101,7 @@ use vm::{ costs::{ExecutionCost, LimitedCostTracker}, database::{ clarity_store::ContractCommitment, BurnStateDB, ClarityDatabase, ClaritySerializable, - STXBalance, + STXBalance, StoreType, }, errors::Error as ClarityRuntimeError, errors::Error::Unchecked, @@ -149,7 +151,7 @@ pub struct ConversationHttp { // ongoing block streams reply_streams: VecDeque<( ReplyHandleHttp, - Option<(HttpChunkedTransferWriterState, BlockStreamData)>, + Option<(HttpChunkedTransferWriterState, StreamCursor)>, bool, )>, @@ -163,9 +165,10 @@ impl fmt::Display for ConversationHttp { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, - "http:id={},request={:?}", + "http:id={},request={:?},peer={:?}", self.conn_id, - self.pending_request.is_some() + self.pending_request.is_some(), + &self.peer_addr ) } } @@ -174,9 +177,10 @@ impl fmt::Debug for ConversationHttp { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, - "http:id={},request={:?}", + "http:id={},request={:?},peer={:?}", self.conn_id, - self.pending_request.is_some() + self.pending_request.is_some(), + &self.peer_addr ) } } @@ -209,14 +213,14 @@ impl RPCPeerInfoData { Some(ref unconfirmed) => { if unconfirmed.num_mined_txs() > 0 { ( - unconfirmed.unconfirmed_chain_tip.clone(), - unconfirmed.last_mblock_seq, + Some(unconfirmed.unconfirmed_chain_tip.clone()), + Some(unconfirmed.last_mblock_seq), ) } else { - (StacksBlockId([0x00; 32]), 0) + (None, None) } } - None => (StacksBlockId([0x00; 32]), 0), + None => (None, None), }; RPCPeerInfoData { @@ -620,7 +624,6 @@ impl ConversationHttp { &handler_args.genesis_chainstate_hash, ); let response = HttpResponseType::PeerInfo(response_metadata, pi); - // timer.observe_duration(); response.send(http, fd) } @@ -686,7 +689,7 @@ impl ConversationHttp { MAX_ATTACHMENT_INV_PAGES_PER_REQUEST ); warn!("{}", msg); - let response = HttpResponseType::NotFound(response_metadata, msg); + let response = HttpResponseType::BadRequest(response_metadata, msg); response.send(http, fd)?; return Ok(()); } @@ -779,7 +782,7 @@ impl ConversationHttp { fd: &mut W, response_metadata: HttpResponseMetadata, msg: String, - ) -> Result, net_error> { + ) -> Result, net_error> { let response = HttpResponseType::NotFound(response_metadata, msg); return response.send(http, fd).and_then(|_| Ok(None)); } @@ -790,17 +793,67 @@ impl ConversationHttp { fd: &mut W, response_metadata: HttpResponseMetadata, msg: String, - ) -> Result, net_error> { + ) -> Result, net_error> { // oops warn!("{}", &msg); let response = HttpResponseType::ServerError(response_metadata, msg); return response.send(http, fd).and_then(|_| Ok(None)); } + /// Handle a GET headers. Start streaming the reply. + /// The response's preamble (but not the headers list) will be synchronously written to the fd + /// (so use a fd that can buffer!) + /// Return a StreamCursor struct for the reward cycle we're sending, so we can continue to + /// make progress sending it + fn handle_getheaders( + http: &mut StacksHttp, + fd: &mut W, + req: &HttpRequestType, + tip: &StacksBlockId, + quantity: u64, + chainstate: &StacksChainState, + ) -> Result, net_error> { + let response_metadata = HttpResponseMetadata::from(req); + if quantity > (MAX_HEADERS as u64) { + // bad request + let response = HttpResponseType::BadRequestJSON( + response_metadata, + serde_json::Value::String(format!( + "Invalid request: requested more than {} headers", + MAX_HEADERS + )), + ); + response.send(http, fd).and_then(|_| Ok(None)) + } else { + let stream = match StreamCursor::new_headers(chainstate, tip, quantity as u32) { + Ok(stream) => stream, + Err(chain_error::NoSuchBlockError) => { + return ConversationHttp::handle_notfound( + http, + fd, + response_metadata, + format!("No such block {:?}", &tip), + ); + } + Err(e) => { + // nope -- error trying to check + warn!("Failed to load block header {:?}: {:?}", req, &e); + let response = HttpResponseType::ServerError( + response_metadata, + format!("Failed to query block header {}", tip.to_hex()), + ); + return response.send(http, fd).and_then(|_| Ok(None)); + } + }; + let response = HttpResponseType::HeaderStream(response_metadata); + response.send(http, fd).and_then(|_| Ok(Some(stream))) + } + } + /// Handle a GET block. Start streaming the reply. /// The response's preamble (but not the block data) will be synchronously written to the fd /// (so use a fd that can buffer!) - /// Return a BlockStreamData struct for the block that we're sending, so we can continue to + /// Return a StreamCursor struct for the block that we're sending, so we can continue to /// make progress sending it. fn handle_getblock( http: &mut StacksHttp, @@ -808,7 +861,7 @@ impl ConversationHttp { req: &HttpRequestType, index_block_hash: &StacksBlockId, chainstate: &StacksChainState, - ) -> Result, net_error> { + ) -> Result, net_error> { monitoring::increment_stx_blocks_served_counter(); let response_metadata = HttpResponseMetadata::from(req); @@ -833,7 +886,7 @@ impl ConversationHttp { } Ok(true) => { // yup! start streaming it back - let stream = BlockStreamData::new_block(index_block_hash.clone()); + let stream = StreamCursor::new_block(index_block_hash.clone()); let response = HttpResponseType::BlockStream(response_metadata); response.send(http, fd).and_then(|_| Ok(Some(stream))) } @@ -843,7 +896,7 @@ impl ConversationHttp { /// Handle a GET confirmed microblock stream, by _anchor block hash_. Start streaming the reply. /// The response's preamble (but not the block data) will be synchronously written to the fd /// (so use a fd that can buffer!) - /// Return a BlockStreamData struct for the block that we're sending, so we can continue to + /// Return a StreamCursor struct for the block that we're sending, so we can continue to /// make progress sending it. fn handle_getmicroblocks_confirmed( http: &mut StacksHttp, @@ -851,7 +904,7 @@ impl ConversationHttp { req: &HttpRequestType, index_anchor_block_hash: &StacksBlockId, chainstate: &StacksChainState, - ) -> Result, net_error> { + ) -> Result, net_error> { monitoring::increment_stx_confirmed_micro_blocks_served_counter(); let response_metadata = HttpResponseMetadata::from(req); @@ -905,7 +958,7 @@ impl ConversationHttp { ); } Ok(Some(tail_index_microblock_hash)) => { - let (response, stream_opt) = match BlockStreamData::new_microblock_confirmed( + let (response, stream_opt) = match StreamCursor::new_microblock_confirmed( chainstate, tail_index_microblock_hash.clone(), ) { @@ -948,7 +1001,7 @@ impl ConversationHttp { /// Handle a GET confirmed microblock stream, by last _index microblock hash_ in the stream. Start streaming the reply. /// The response's preamble (but not the block data) will be synchronously written to the fd /// (so use a fd that can buffer!) - /// Return a BlockStreamData struct for the block that we're sending, so we can continue to + /// Return a StreamCursor struct for the block that we're sending, so we can continue to /// make progress sending it. fn handle_getmicroblocks_indexed( http: &mut StacksHttp, @@ -956,7 +1009,7 @@ impl ConversationHttp { req: &HttpRequestType, tail_index_microblock_hash: &StacksBlockId, chainstate: &StacksChainState, - ) -> Result, net_error> { + ) -> Result, net_error> { monitoring::increment_stx_micro_blocks_served_counter(); let response_metadata = HttpResponseMetadata::from(req); @@ -991,7 +1044,7 @@ impl ConversationHttp { } Ok(true) => { // yup! start streaming it back - let (response, stream_opt) = match BlockStreamData::new_microblock_confirmed( + let (response, stream_opt) = match StreamCursor::new_microblock_confirmed( chainstate, tail_index_microblock_hash.clone(), ) { @@ -1067,21 +1120,30 @@ impl ConversationHttp { clarity_tx.with_clarity_db_readonly(|clarity_db| { let key = ClarityDatabase::make_key_for_account_balance(&account); let burn_block_height = clarity_db.get_current_burnchain_block_height() as u64; - let (balance, balance_proof) = clarity_db - .get_with_proof::(&key) - .map(|(a, b)| (a, format!("0x{}", b.to_hex()))) - .unwrap_or_else(|| (STXBalance::zero(), "".into())); - let balance_proof = if with_proof { - Some(balance_proof) + let (balance, balance_proof) = if with_proof { + clarity_db + .get_with_proof::(&key) + .map(|(a, b)| (a, Some(format!("0x{}", b.to_hex())))) + .unwrap_or_else(|| (STXBalance::zero(), Some("".into()))) } else { - None + clarity_db + .get::(&key) + .map(|a| (a, None)) + .unwrap_or_else(|| (STXBalance::zero(), None)) }; + let key = ClarityDatabase::make_key_for_account_nonce(&account); - let (nonce, nonce_proof) = clarity_db - .get_with_proof(&key) - .map(|(a, b)| (a, format!("0x{}", b.to_hex()))) - .unwrap_or_else(|| (0, "".into())); - let nonce_proof = if with_proof { Some(nonce_proof) } else { None }; + let (nonce, nonce_proof) = if with_proof { + clarity_db + .get_with_proof(&key) + .map(|(a, b)| (a, Some(format!("0x{}", b.to_hex())))) + .unwrap_or_else(|| (0, Some("".into()))) + } else { + clarity_db + .get(&key) + .map(|a| (a, None)) + .unwrap_or_else(|| (0, None)) + }; let unlocked = balance.get_available_balance_at_burn_block(burn_block_height); let (locked, unlock_height) = @@ -1109,6 +1171,57 @@ impl ConversationHttp { response.send(http, fd).map(|_| ()) } + /// Handle a GET on a smart contract's data var, given the current chain tip. Optionally + /// supplies a MARF proof for the value. + fn handle_get_data_var( + http: &mut StacksHttp, + fd: &mut W, + req: &HttpRequestType, + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + tip: &StacksBlockId, + contract_addr: &StacksAddress, + contract_name: &ContractName, + var_name: &ClarityName, + with_proof: bool, + ) -> Result<(), net_error> { + let response_metadata = HttpResponseMetadata::from(req); + let contract_identifier = + QualifiedContractIdentifier::new(contract_addr.clone().into(), contract_name.clone()); + + let response = + match chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let key = ClarityDatabase::make_key_for_trip( + &contract_identifier, + StoreType::Variable, + var_name, + ); + + let (value, marf_proof) = if with_proof { + clarity_db + .get_with_proof::(&key) + .map(|(a, b)| (a, Some(format!("0x{}", b.to_hex()))))? + } else { + clarity_db.get::(&key).map(|a| (a, None))? + }; + + let data = format!("0x{}", value.serialize()); + Some(DataVarResponse { data, marf_proof }) + }) + }) { + Ok(Some(Some(data))) => HttpResponseType::GetDataVar(response_metadata, data), + Ok(Some(None)) => { + HttpResponseType::NotFound(response_metadata, "Data var not found".into()) + } + Ok(None) | Err(_) => { + HttpResponseType::NotFound(response_metadata, "Chain tip not found".into()) + } + }; + + response.send(http, fd).map(|_| ()) + } + /// Handle a GET on a smart contract's data map, given the current chain tip. Optionally /// supplies a MARF proof for the value. fn handle_get_map_entry( @@ -1136,22 +1249,22 @@ impl ConversationHttp { map_name, key, ); - let (value, marf_proof) = clarity_db - .get_with_proof::(&key) - .map(|(a, b)| (a, format!("0x{}", b.to_hex()))) - .unwrap_or_else(|| { - test_debug!("No value for '{}' in {}", &key, tip); - (Value::none(), "".into()) - }); - let marf_proof = if with_proof { - test_debug!( - "Return a MARF proof of '{}' of {} bytes", - &key, - marf_proof.as_bytes().len() - ); - Some(marf_proof) + let (value, marf_proof) = if with_proof { + clarity_db + .get_with_proof::(&key) + .map(|(a, b)| (a, Some(format!("0x{}", b.to_hex())))) + .unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (Value::none(), Some("".into())) + }) } else { - None + clarity_db + .get::(&key) + .map(|a| (a, None)) + .unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (Value::none(), None) + }) }; let data = format!("0x{}", value.serialize()); @@ -1278,19 +1391,21 @@ impl ConversationHttp { clarity_tx.with_clarity_db_readonly(|db| { let source = db.get_contract_src(&contract_identifier)?; let contract_commit_key = make_contract_hash_key(&contract_identifier); - let (contract_commit, proof) = db - .get_with_proof::(&contract_commit_key) - .expect("BUG: obtained source, but couldn't get MARF proof."); - let marf_proof = if with_proof { - Some(proof.to_hex()) + let (contract_commit, proof) = if with_proof { + db.get_with_proof::(&contract_commit_key) + .map(|(a, b)| (a, Some(format!("0x{}", &b.to_hex())))) + .expect("BUG: obtained source, but couldn't get contract commit") } else { - None + db.get::(&contract_commit_key) + .map(|a| (a, None)) + .expect("BUG: obtained source, but couldn't get contract commit") }; + let publish_height = contract_commit.block_height; Some(ContractSrcResponse { source, publish_height, - marf_proof, + marf_proof: proof, }) }) }) { @@ -1400,7 +1515,7 @@ impl ConversationHttp { /// Handle a GET unconfirmed microblock stream. Start streaming the reply. /// The response's preamble (but not the block data) will be synchronously written to the fd /// (so use a fd that can buffer!) - /// Return a BlockStreamData struct for the block that we're sending, so we can continue to + /// Return a StreamCursor struct for the block that we're sending, so we can continue to /// make progress sending it. fn handle_getmicroblocks_unconfirmed( http: &mut StacksHttp, @@ -1409,7 +1524,7 @@ impl ConversationHttp { index_anchor_block_hash: &StacksBlockId, min_seq: u16, chainstate: &StacksChainState, - ) -> Result, net_error> { + ) -> Result, net_error> { let response_metadata = HttpResponseMetadata::from(req); // do we have this unconfirmed microblock stream? @@ -1444,7 +1559,7 @@ impl ConversationHttp { } Ok(true) => { // yup! start streaming it back - let (response, stream_opt) = match BlockStreamData::new_microblock_unconfirmed( + let (response, stream_opt) = match StreamCursor::new_microblock_unconfirmed( chainstate, index_anchor_block_hash.clone(), min_seq, @@ -1539,23 +1654,64 @@ impl ConversationHttp { /// Load up the canonical Stacks chain tip. Note that this is subject to both burn chain block /// Stacks block availability -- different nodes with different partial replicas of the Stacks chain state /// will return different values here. - /// tip_opt is given by the HTTP request as the optional query parameter for the chain tip - /// hash. It will be None if there was no paramter given. - /// The order of chain tips this method prefers is as follows: - /// * tip_opt, if it's Some(..), - /// * the unconfirmed canonical stacks chain tip, if initialized - /// * the confirmed canonical stacks chain tip + /// + /// # Warn + /// - There is a potential race condition. If this function is loading the latest unconfirmed + /// tip, that tip may get invalidated by the time it is used in `maybe_read_only_clarity_tx`, + /// which is used to load clarity state at a particular tip (which would lead to a 404 error). + /// If this race condition occurs frequently, we can modify `maybe_read_only_clarity_tx` to + /// re-load the unconfirmed chain tip. Refer to issue #2997. + /// + /// # Inputs + /// - `tip_req` is given by the HTTP request as the optional query parameter for the chain tip + /// hash. It will be UseLatestAnchoredTip if there was no parameter given. If it is set to + /// `latest`, the parameter will be set to UseLatestUnconfirmedTip. fn handle_load_stacks_chain_tip( http: &mut StacksHttp, fd: &mut W, req: &HttpRequestType, - tip_opt: Option<&StacksBlockId>, + tip_req: &TipRequest, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ) -> Result, net_error> { - match tip_opt { - Some(tip) => Ok(Some(*tip).clone()), - None => match chainstate.get_stacks_chain_tip(sortdb)? { + match tip_req { + TipRequest::UseLatestUnconfirmedTip => { + let unconfirmed_chain_tip_opt = match &mut chainstate.unconfirmed_state { + Some(unconfirmed_state) => { + match unconfirmed_state.get_unconfirmed_state_if_exists() { + Ok(res) => res, + Err(msg) => { + let response_metadata = HttpResponseMetadata::from(req); + let response = HttpResponseType::NotFound(response_metadata, msg); + return response.send(http, fd).and_then(|_| Ok(None)); + } + } + } + None => None, + }; + + if let Some(unconfirmed_chain_tip) = unconfirmed_chain_tip_opt { + Ok(Some(unconfirmed_chain_tip)) + } else { + match chainstate.get_stacks_chain_tip(sortdb)? { + Some(tip) => Ok(Some(StacksBlockHeader::make_index_block_hash( + &tip.consensus_hash, + &tip.anchored_block_hash, + ))), + None => { + let response_metadata = HttpResponseMetadata::from(req); + warn!("Failed to load Stacks chain tip"); + let response = HttpResponseType::NotFound( + response_metadata, + format!("Failed to load Stacks chain tip"), + ); + response.send(http, fd).and_then(|_| Ok(None)) + } + } + } + } + TipRequest::SpecificTip(tip) => Ok(Some(*tip).clone()), + TipRequest::UseLatestAnchoredTip => match chainstate.get_stacks_chain_tip(sortdb)? { Some(tip) => Ok(Some(StacksBlockHeader::make_index_block_hash( &tip.consensus_hash, &tip.anchored_block_hash, @@ -1577,24 +1733,16 @@ impl ConversationHttp { http: &mut StacksHttp, fd: &mut W, req: &HttpRequestType, - tip_opt: Option<&StacksBlockId>, - sortdb: &SortitionDB, + tip: StacksBlockId, chainstate: &StacksChainState, ) -> Result, net_error> { - match tip_opt { - Some(tip) => match chainstate.get_block_header_hashes(&tip)? { - Some((ch, bl)) => { - return Ok(Some((ch, bl))); - } - None => {} - }, - None => match chainstate.get_stacks_chain_tip(sortdb)? { - Some(tip) => { - return Ok(Some((tip.consensus_hash, tip.anchored_block_hash))); - } - None => {} - }, + match chainstate.get_block_header_hashes(&tip)? { + Some((ch, bl)) => { + return Ok(Some((ch, bl))); + } + None => {} } + let response_metadata = HttpResponseMetadata::from(req); warn!("Failed to load Stacks chain tip"); let response = HttpResponseType::ServerError( @@ -1932,6 +2080,35 @@ impl ConversationHttp { response.send(http, fd).and_then(|_| Ok(accepted)) } + /// Handle a request for mempool transactions in bulk + fn handle_mempool_query( + http: &mut StacksHttp, + fd: &mut W, + req: &HttpRequestType, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + query: MemPoolSyncData, + max_txs: u64, + page_id: Option, + ) -> Result { + let response_metadata = HttpResponseMetadata::from(req); + let response = HttpResponseType::MemPoolTxStream(response_metadata); + let height = chainstate + .get_stacks_chain_tip(sortdb)? + .map(|blk| blk.height) + .unwrap_or(0); + + debug!( + "Begin mempool query"; + "page_id" => %page_id.map(|txid| format!("{}", &txid)).unwrap_or("(none".to_string()), + "block_height" => height, + "max_txs" => max_txs + ); + + let stream = StreamCursor::new_tx_stream(query, max_txs, height, page_id); + response.send(http, fd).and_then(|_| Ok(stream)) + } + /// Handle an external HTTP request. /// Some requests, such as those for blocks, will create new reply streams. This method adds /// those new streams into the `reply_streams` set. @@ -1962,12 +2139,12 @@ impl ConversationHttp { )?; None } - HttpRequestType::GetPoxInfo(ref _md, ref tip_opt) => { + HttpRequestType::GetPoxInfo(ref _md, ref tip_req) => { if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( &mut self.connection.protocol, &mut reply, &req, - tip_opt.as_ref(), + tip_req, sortdb, chainstate, )? { @@ -1992,6 +2169,27 @@ impl ConversationHttp { )?; None } + HttpRequestType::GetHeaders(ref _md, ref quantity, ref tip_req) => { + if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( + &mut self.connection.protocol, + &mut reply, + &req, + tip_req, + sortdb, + chainstate, + )? { + ConversationHttp::handle_getheaders( + &mut self.connection.protocol, + &mut reply, + &req, + &tip, + *quantity, + chainstate, + )? + } else { + None + } + } HttpRequestType::GetBlock(ref _md, ref index_block_hash) => { ConversationHttp::handle_getblock( &mut self.connection.protocol, @@ -2042,12 +2240,12 @@ impl ConversationHttp { )?; None } - HttpRequestType::GetAccount(ref _md, ref principal, ref tip_opt, ref with_proof) => { + HttpRequestType::GetAccount(ref _md, ref principal, ref tip_req, ref with_proof) => { if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( &mut self.connection.protocol, &mut reply, &req, - tip_opt.as_ref(), + tip_req, sortdb, chainstate, )? { @@ -2064,20 +2262,51 @@ impl ConversationHttp { } None } + HttpRequestType::GetDataVar( + ref _md, + ref contract_addr, + ref contract_name, + ref var_name, + ref tip_req, + ref with_proof, + ) => { + if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( + &mut self.connection.protocol, + &mut reply, + &req, + tip_req, + sortdb, + chainstate, + )? { + ConversationHttp::handle_get_data_var( + &mut self.connection.protocol, + &mut reply, + &req, + sortdb, + chainstate, + &tip, + contract_addr, + contract_name, + var_name, + *with_proof, + )?; + } + None + } HttpRequestType::GetMapEntry( ref _md, ref contract_addr, ref contract_name, ref map_name, ref key, - ref tip_opt, + ref tip_req, ref with_proof, ) => { if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( &mut self.connection.protocol, &mut reply, &req, - tip_opt.as_ref(), + tip_req, sortdb, chainstate, )? { @@ -2109,13 +2338,13 @@ impl ConversationHttp { ref _md, ref contract_addr, ref contract_name, - ref tip_opt, + ref tip_req, ) => { if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( &mut self.connection.protocol, &mut reply, &req, - tip_opt.as_ref(), + tip_req, sortdb, chainstate, )? { @@ -2151,13 +2380,13 @@ impl ConversationHttp { ref as_sender, ref func_name, ref args, - ref tip_opt, + ref tip_req, ) => { if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( &mut self.connection.protocol, &mut reply, &req, - tip_opt.as_ref(), + tip_req, sortdb, chainstate, )? { @@ -2182,14 +2411,14 @@ impl ConversationHttp { ref _md, ref contract_addr, ref contract_name, - ref tip_opt, + ref tip_req, ref with_proof, ) => { if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( &mut self.connection.protocol, &mut reply, &req, - tip_opt.as_ref(), + tip_req, sortdb, chainstate, )? { @@ -2285,38 +2514,60 @@ impl ConversationHttp { } None } - HttpRequestType::PostMicroblock(ref _md, ref mblock, ref tip_opt) => { - if let Some((consensus_hash, block_hash)) = - ConversationHttp::handle_load_stacks_chain_tip_hashes( - &mut self.connection.protocol, - &mut reply, - &req, - tip_opt.as_ref(), - sortdb, - chainstate, - )? - { - let accepted = ConversationHttp::handle_post_microblock( - &mut self.connection.protocol, - &mut reply, - &req, - &consensus_hash, - &block_hash, - chainstate, - mblock, - )?; - if accepted { - // forward to peer network - let tip = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash); - ret = Some(StacksMessageType::Microblocks(MicroblocksData { - index_anchor_block: tip, - microblocks: vec![(*mblock).clone()], - })); + HttpRequestType::PostMicroblock(ref _md, ref mblock, ref tip_req) => { + if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( + &mut self.connection.protocol, + &mut reply, + &req, + tip_req, + sortdb, + chainstate, + )? { + if let Some((consensus_hash, block_hash)) = + ConversationHttp::handle_load_stacks_chain_tip_hashes( + &mut self.connection.protocol, + &mut reply, + &req, + tip, + chainstate, + )? + { + let accepted = ConversationHttp::handle_post_microblock( + &mut self.connection.protocol, + &mut reply, + &req, + &consensus_hash, + &block_hash, + chainstate, + mblock, + )?; + if accepted { + // forward to peer network + let tip = StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block_hash, + ); + ret = Some(StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: tip, + microblocks: vec![(*mblock).clone()], + })); + } } } None } + HttpRequestType::MemPoolQuery(ref _md, ref query, ref page_id_opt) => { + Some(ConversationHttp::handle_mempool_query( + &mut self.connection.protocol, + &mut reply, + &req, + sortdb, + chainstate, + query.clone(), + network.connection_opts.mempool_max_tx_query, + page_id_opt.clone(), + )?) + } HttpRequestType::OptionsPreflight(ref _md, ref _path) => { let response_metadata = HttpResponseMetadata::from(&req); let response = HttpResponseType::OptionsPreflight(response_metadata); @@ -2330,13 +2581,13 @@ impl ConversationHttp { ref contract_addr, ref contract_name, ref trait_id, - ref tip_opt, + ref tip_req, ) => { if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( &mut self.connection.protocol, &mut reply, &req, - tip_opt.as_ref(), + tip_req, sortdb, chainstate, )? { @@ -2392,11 +2643,9 @@ impl ConversationHttp { } /// Make progress on outbound requests. - /// Return true if the connection should be kept alive after all messages are drained. - /// If we process a request with "Connection: close", then return false (indicating that the - /// connection should be severed once the conversation is drained) fn send_outbound_responses( &mut self, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, ) -> Result<(), net_error> { // send out streamed responses in the order they were requested @@ -2410,6 +2659,8 @@ impl ConversationHttp { &self, self.reply_streams.len() ); + let _self_str = format!("{}", &self); + match self.reply_streams.front_mut() { Some((ref mut reply, ref mut stream_opt, ref keep_alive)) => { do_keep_alive = *keep_alive; @@ -2419,32 +2670,44 @@ impl ConversationHttp { Some((ref mut http_chunk_state, ref mut stream)) => { let mut encoder = HttpChunkedTransferWriter::from_writer_state(reply, http_chunk_state); - match stream.stream_to(chainstate, &mut encoder, STREAM_CHUNK_SIZE) { + match stream.stream_to(mempool, chainstate, &mut encoder, STREAM_CHUNK_SIZE) + { Ok(nw) => { - test_debug!("streamed {} bytes", nw); + test_debug!("{}: Streamed {} bytes", &_self_str, nw); if nw == 0 { // EOF -- finish chunk and stop sending. if !encoder.corked() { encoder.flush().map_err(|e| { - test_debug!("Write error on encoder flush: {:?}", &e); + test_debug!( + "{}: Write error on encoder flush: {:?}", + &_self_str, + &e + ); net_error::WriteError(e) })?; encoder.cork(); - test_debug!("stream indicates EOF"); + test_debug!("{}: Stream indicates EOF", &_self_str); } // try moving some data to the connection only once we're done // streaming match reply.try_flush() { Ok(res) => { - test_debug!("Streamed reply is drained"); + test_debug!( + "{}: Streamed reply is drained?: {}", + &_self_str, + res + ); drained_handle = res; } Err(e) => { // dead - warn!("Broken HTTP connection: {:?}", &e); + warn!( + "{}: Broken HTTP connection: {:?}", + &_self_str, &e + ); broken = true; } } @@ -2456,7 +2719,10 @@ impl ConversationHttp { // For example, if we're streaming an unconfirmed block or // microblock, the data can get moved to the chunk store out from // under the stream. - warn!("Failed to send to HTTP connection: {:?}", &e); + warn!( + "{}: Failed to send to HTTP connection: {:?}", + &_self_str, &e + ); broken = true; } } @@ -2468,12 +2734,12 @@ impl ConversationHttp { // try moving some data to the connection match reply.try_flush() { Ok(res) => { - test_debug!("Reply is drained"); + test_debug!("{}: Reply is drained", &_self_str); drained_handle = res; } Err(e) => { // dead - warn!("Broken HTTP connection: {:?}", &e); + warn!("{}: Broken HTTP connection: {:?}", &_self_str, &e); broken = true; } } @@ -2500,7 +2766,6 @@ impl ConversationHttp { self.keep_alive = false; } } - Ok(()) } @@ -2568,8 +2833,12 @@ impl ConversationHttp { } /// Make progress on in-flight messages. - pub fn try_flush(&mut self, chainstate: &mut StacksChainState) -> Result<(), net_error> { - self.send_outbound_responses(chainstate)?; + pub fn try_flush( + &mut self, + mempool: &MemPoolDB, + chainstate: &mut StacksChainState, + ) -> Result<(), net_error> { + self.send_outbound_responses(mempool, chainstate)?; self.recv_inbound_response()?; Ok(()) } @@ -2649,7 +2918,7 @@ impl ConversationHttp { self.handle_request(req, network, sortdb, chainstate, mempool, handler_args) })?; - debug!("Processed HTTPRequest"; "path" => %path, "processing_time_ms" => start_time.elapsed().as_millis()); + debug!("Processed HTTPRequest"; "path" => %path, "processing_time_ms" => start_time.elapsed().as_millis(), "conn_id" => self.conn_id, "peer_addr" => &self.peer_addr); if let Some(msg) = msg_opt { ret.push(msg); @@ -2709,12 +2978,13 @@ impl ConversationHttp { pub fn send( &mut self, w: &mut W, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, ) -> Result { let mut total_sz = 0; loop { // prime the Write - self.try_flush(chainstate)?; + self.try_flush(mempool, chainstate)?; let sz = match self.connection.send_data(w) { Ok(sz) => sz, @@ -2741,10 +3011,10 @@ impl ConversationHttp { } /// Make a new getinfo request to this endpoint - pub fn new_getpoxinfo(&self, tip_opt: Option) -> HttpRequestType { + pub fn new_getpoxinfo(&self, tip_req: TipRequest) -> HttpRequestType { HttpRequestType::GetPoxInfo( HttpRequestMetadata::from_host(self.peer_host.clone()), - tip_opt, + tip_req, ) } @@ -2753,6 +3023,15 @@ impl ConversationHttp { HttpRequestType::GetNeighbors(HttpRequestMetadata::from_host(self.peer_host.clone())) } + /// Make a new getheaders request to this endpoint + pub fn new_getheaders(&self, quantity: u64, tip_req: TipRequest) -> HttpRequestType { + HttpRequestType::GetHeaders( + HttpRequestMetadata::from_host(self.peer_host.clone()), + quantity, + tip_req, + ) + } + /// Make a new getblock request to this endpoint pub fn new_getblock(&self, index_block_hash: StacksBlockId) -> HttpRequestType { HttpRequestType::GetBlock( @@ -2826,12 +3105,12 @@ impl ConversationHttp { pub fn new_post_microblock( &self, mblock: StacksMicroblock, - tip_opt: Option, + tip_req: TipRequest, ) -> HttpRequestType { HttpRequestType::PostMicroblock( HttpRequestMetadata::from_host(self.peer_host.clone()), mblock, - tip_opt, + tip_req, ) } @@ -2839,13 +3118,32 @@ impl ConversationHttp { pub fn new_getaccount( &self, principal: PrincipalData, - tip_opt: Option, + tip_req: TipRequest, with_proof: bool, ) -> HttpRequestType { HttpRequestType::GetAccount( HttpRequestMetadata::from_host(self.peer_host.clone()), principal, - tip_opt, + tip_req, + with_proof, + ) + } + + /// Make a new request for a data var + pub fn new_getdatavar( + &self, + contract_addr: StacksAddress, + contract_name: ContractName, + var_name: ClarityName, + tip_req: TipRequest, + with_proof: bool, + ) -> HttpRequestType { + HttpRequestType::GetDataVar( + HttpRequestMetadata::from_host(self.peer_host.clone()), + contract_addr, + contract_name, + var_name, + tip_req, with_proof, ) } @@ -2857,7 +3155,7 @@ impl ConversationHttp { contract_name: ContractName, map_name: ClarityName, key: Value, - tip_opt: Option, + tip_req: TipRequest, with_proof: bool, ) -> HttpRequestType { HttpRequestType::GetMapEntry( @@ -2866,7 +3164,7 @@ impl ConversationHttp { contract_name, map_name, key, - tip_opt, + tip_req, with_proof, ) } @@ -2876,14 +3174,14 @@ impl ConversationHttp { &self, contract_addr: StacksAddress, contract_name: ContractName, - tip_opt: Option, + tip_req: TipRequest, with_proof: bool, ) -> HttpRequestType { HttpRequestType::GetContractSrc( HttpRequestMetadata::from_host(self.peer_host.clone()), contract_addr, contract_name, - tip_opt, + tip_req, with_proof, ) } @@ -2893,13 +3191,13 @@ impl ConversationHttp { &self, contract_addr: StacksAddress, contract_name: ContractName, - tip_opt: Option, + tip_req: TipRequest, ) -> HttpRequestType { HttpRequestType::GetContractABI( HttpRequestMetadata::from_host(self.peer_host.clone()), contract_addr, contract_name, - tip_opt, + tip_req, ) } @@ -2911,7 +3209,7 @@ impl ConversationHttp { sender: PrincipalData, function_name: ClarityName, function_args: Vec, - tip_opt: Option, + tip_req: TipRequest, ) -> HttpRequestType { HttpRequestType::CallReadOnlyFunction( HttpRequestMetadata::from_host(self.peer_host.clone()), @@ -2920,7 +3218,7 @@ impl ConversationHttp { sender, function_name, function_args, - tip_opt, + tip_req, ) } @@ -2936,6 +3234,19 @@ impl ConversationHttp { pages_indexes, ) } + + /// Make a new request for mempool contents + pub fn new_mempool_query( + &self, + query: MemPoolSyncData, + page_id_opt: Option, + ) -> HttpRequestType { + HttpRequestType::MemPoolQuery( + HttpRequestMetadata::from_host(self.peer_host.clone()), + query, + page_id_opt, + ) + } } #[cfg(test)] @@ -2950,8 +3261,8 @@ mod test { use burnchains::*; use chainstate::burn::ConsensusHash; use chainstate::stacks::db::blocks::test::*; - use chainstate::stacks::db::BlockStreamData; use chainstate::stacks::db::StacksChainState; + use chainstate::stacks::db::StreamCursor; use chainstate::stacks::miner::*; use chainstate::stacks::test::*; use chainstate::stacks::Error as chain_error; @@ -2969,6 +3280,8 @@ mod test { use crate::types::chainstate::BurnchainHeaderHash; use chainstate::stacks::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; + use core::mempool::{BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS}; + use super::*; const TEST_CONTRACT: &'static str = " @@ -2980,14 +3293,19 @@ mod test { (define-public (add-unit) (begin (map-set unit-map { account: tx-sender } { units: 1 } ) + (var-set bar 1) (ok 1))) (begin (map-set unit-map { account: 'ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R } { units: 123 }))"; + const TEST_CONTRACT_UNCONFIRMED: &'static str = "(define-read-only (ro-test) (ok 1))"; + fn convo_send_recv( sender: &mut ConversationHttp, + sender_mempool: &MemPoolDB, sender_chainstate: &mut StacksChainState, receiver: &mut ConversationHttp, + receiver_mempool: &MemPoolDB, receiver_chainstate: &mut StacksChainState, ) -> () { let (mut pipe_read, mut pipe_write) = Pipe::new(); @@ -2996,15 +3314,19 @@ mod test { loop { let res = true; - sender.try_flush(sender_chainstate).unwrap(); - receiver.try_flush(receiver_chainstate).unwrap(); + sender.try_flush(sender_mempool, sender_chainstate).unwrap(); + receiver + .try_flush(sender_mempool, receiver_chainstate) + .unwrap(); pipe_write.try_flush().unwrap(); let all_relays_flushed = receiver.num_pending_outbound() == 0 && sender.num_pending_outbound() == 0; - let nw = sender.send(&mut pipe_write, sender_chainstate).unwrap(); + let nw = sender + .send(&mut pipe_write, sender_mempool, sender_chainstate) + .unwrap(); let nr = receiver.recv(&mut pipe_read).unwrap(); test_debug!( @@ -3023,12 +3345,19 @@ mod test { } } + /// General testing function to test RPC calls. + /// This function sets up two peers, a client and a server. + /// It takes in a function of type F that generates the request to be sent to the server + /// It takes in another function of type C that verifies that the result from + /// the server is as expected. + /// The parameter `include_microblocks` determines whether a microblock stream is mined or not. fn test_rpc( test_name: &str, peer_1_p2p: u16, peer_1_http: u16, peer_2_p2p: u16, peer_2_http: u16, + include_microblocks: bool, make_request: F, check_result: C, ) -> () @@ -3145,7 +3474,7 @@ mod test { }; // make an unconfirmed contract - let unconfirmed_contract = "(define-read-only (ro-test) (ok 1))"; + let unconfirmed_contract = TEST_CONTRACT_UNCONFIRMED.clone(); let mut tx_unconfirmed_contract = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&privk1).unwrap(), @@ -3232,68 +3561,132 @@ mod test { peer_1.process_stacks_epoch_at_tip(&stacks_block, &vec![]); peer_2.process_stacks_epoch_at_tip(&stacks_block, &vec![]); - // build 1-block microblock stream with the contract-call and the unconfirmed contract - let microblock = { - let sortdb = peer_1.sortdb.take().unwrap(); - Relayer::setup_unconfirmed_state(peer_1.chainstate(), &sortdb).unwrap(); - let mblock = { - let sort_iconn = sortdb.index_conn(); - let mut microblock_builder = StacksMicroblockBuilder::new( - stacks_block.block_hash(), - consensus_hash.clone(), - peer_1.chainstate(), - &sort_iconn, - BlockBuilderSettings::max_value(), - ) - .unwrap(); - let microblock = microblock_builder - .mine_next_microblock_from_txs( - vec![ - (tx_cc_signed, tx_cc_len), - (tx_unconfirmed_contract_signed, tx_unconfirmed_contract_len), - ], - µblock_privkey, + // begin microblock section + if include_microblocks { + // build 1-block microblock stream with the contract-call and the unconfirmed contract + let microblock = { + let sortdb = peer_1.sortdb.take().unwrap(); + Relayer::setup_unconfirmed_state(peer_1.chainstate(), &sortdb).unwrap(); + let mblock = { + let sort_iconn = sortdb.index_conn(); + let mut microblock_builder = StacksMicroblockBuilder::new( + stacks_block.block_hash(), + consensus_hash.clone(), + peer_1.chainstate(), + &sort_iconn, + BlockBuilderSettings::max_value(), ) .unwrap(); - microblock + let microblock = microblock_builder + .mine_next_microblock_from_txs( + vec![ + (tx_cc_signed, tx_cc_len), + (tx_unconfirmed_contract_signed, tx_unconfirmed_contract_len), + ], + µblock_privkey, + ) + .unwrap(); + microblock + }; + peer_1.sortdb = Some(sortdb); + mblock }; - peer_1.sortdb = Some(sortdb); - mblock - }; - // store microblock stream - peer_1 - .chainstate() - .preprocess_streamed_microblock( + // store microblock stream + peer_1 + .chainstate() + .preprocess_streamed_microblock( + &consensus_hash, + &stacks_block.block_hash(), + µblock, + ) + .unwrap(); + peer_2 + .chainstate() + .preprocess_streamed_microblock( + &consensus_hash, + &stacks_block.block_hash(), + µblock, + ) + .unwrap(); + + // process microblock stream to generate unconfirmed state + let canonical_tip = StacksBlockHeader::make_index_block_hash( &consensus_hash, &stacks_block.block_hash(), - µblock, + ); + let sortdb1 = peer_1.sortdb.take().unwrap(); + let sortdb2 = peer_2.sortdb.take().unwrap(); + peer_1 + .chainstate() + .reload_unconfirmed_state(&sortdb1.index_conn(), canonical_tip.clone()) + .unwrap(); + peer_2 + .chainstate() + .reload_unconfirmed_state(&sortdb2.index_conn(), canonical_tip.clone()) + .unwrap(); + peer_1.sortdb = Some(sortdb1); + peer_2.sortdb = Some(sortdb2); + } + // end microblock section + + // stuff some transactions into peer_2's mempool + // (relates to mempool query tests) + let mut mempool = peer_2.mempool.take().unwrap(); + let mut mempool_tx = mempool.tx_begin().unwrap(); + for i in 0..10 { + let pk = StacksPrivateKey::new(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&StacksPrivateKey::new())], ) .unwrap(); - peer_2 - .chainstate() - .preprocess_streamed_microblock( + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), &consensus_hash, &stacks_block.block_hash(), - µblock, + txid.clone(), + tx_bytes, + tx_fee, + stacks_block.header.total_work.work, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, ) .unwrap(); - - // process microblock stream to generate unconfirmed state - let canonical_tip = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); - let sortdb1 = peer_1.sortdb.take().unwrap(); - let sortdb2 = peer_2.sortdb.take().unwrap(); - peer_1 - .chainstate() - .reload_unconfirmed_state(&sortdb1.index_conn(), canonical_tip.clone()) - .unwrap(); - peer_2 - .chainstate() - .reload_unconfirmed_state(&sortdb2.index_conn(), canonical_tip.clone()) - .unwrap(); - peer_1.sortdb = Some(sortdb1); - peer_2.sortdb = Some(sortdb2); + } + mempool_tx.commit().unwrap(); + peer_2.mempool.replace(mempool); let view_1 = peer_1.get_burnchain_view().unwrap(); let view_2 = peer_2.get_burnchain_view().unwrap(); @@ -3321,19 +3714,22 @@ mod test { let req = make_request(&mut peer_1, &mut convo_1, &mut peer_2, &mut convo_2); convo_1.send_request(req.clone()).unwrap(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let peer_2_mempool = peer_2.mempool.take().unwrap(); test_debug!("convo1 sends to convo2"); convo_send_recv( &mut convo_1, + &peer_1_mempool, peer_1.chainstate(), &mut convo_2, + &peer_2_mempool, peer_2.chainstate(), ); // hack around the borrow-checker let mut peer_1_sortdb = peer_1.sortdb.take().unwrap(); let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); Relayer::setup_unconfirmed_state(&mut peer_1_stacks_node.chainstate, &peer_1_sortdb) .unwrap(); @@ -3351,6 +3747,7 @@ mod test { peer_1.sortdb = Some(peer_1_sortdb); peer_1.stacks_node = Some(peer_1_stacks_node); peer_1.mempool = Some(peer_1_mempool); + peer_2.mempool = Some(peer_2_mempool); test_debug!("convo2 sends to convo1"); @@ -3374,12 +3771,14 @@ mod test { peer_2.sortdb = Some(peer_2_sortdb); peer_2.stacks_node = Some(peer_2_stacks_node); - peer_2.mempool = Some(peer_2_mempool); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); convo_send_recv( &mut convo_2, + &peer_2_mempool, peer_2.chainstate(), &mut convo_1, + &peer_1_mempool, peer_1.chainstate(), ); @@ -3388,14 +3787,17 @@ mod test { // hack around the borrow-checker convo_send_recv( &mut convo_1, + &peer_1_mempool, peer_1.chainstate(), &mut convo_2, + &peer_2_mempool, peer_2.chainstate(), ); + peer_2.mempool = Some(peer_2_mempool); + let mut peer_1_sortdb = peer_1.sortdb.take().unwrap(); let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); Relayer::setup_unconfirmed_state(&mut peer_1_stacks_node.chainstate, &peer_1_sortdb) .unwrap(); @@ -3410,12 +3812,14 @@ mod test { ) .unwrap(); + convo_1 + .try_flush(&peer_1_mempool, &mut peer_1_stacks_node.chainstate) + .unwrap(); + peer_1.sortdb = Some(peer_1_sortdb); peer_1.stacks_node = Some(peer_1_stacks_node); peer_1.mempool = Some(peer_1_mempool); - convo_1.try_flush(peer_1.chainstate()).unwrap(); - // should have gotten a reply let resp_opt = convo_1.try_get_response(); assert!(resp_opt.is_some()); @@ -3434,6 +3838,7 @@ mod test { 40001, 50000, 50001, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -3468,13 +3873,17 @@ mod test { #[test] #[ignore] fn test_rpc_getpoxinfo() { + // Test v2/pox (aka GetPoxInfo) endpoint. + // In this test, `tip_req` is set to UseLatestAnchoredTip. + // Thus, the query for pox info will be against the canonical Stacks tip, which we expect to succeed. let pox_server_info = RefCell::new(None); test_rpc( "test_rpc_getpoxinfo", - 40000, - 40001, - 50000, - 50001, + 40002, + 40003, + 50002, + 50003, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -3496,7 +3905,7 @@ mod test { ) .unwrap(); *pox_server_info.borrow_mut() = Some(pox_info); - convo_client.new_getpoxinfo(None) + convo_client.new_getpoxinfo(TipRequest::UseLatestAnchoredTip) }, |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { let req_md = http_request.metadata().clone(); @@ -3516,13 +3925,66 @@ mod test { #[test] #[ignore] - fn test_rpc_getneighbors() { + fn test_rpc_getpoxinfo_use_latest_tip() { + // Test v2/pox (aka GetPoxInfo) endpoint. + // In this test, we set `tip_req` to UseLatestUnconfirmedTip, and we expect that querying for pox + // info against the unconfirmed state will succeed. + let pox_server_info = RefCell::new(None); test_rpc( - "test_rpc_getneighbors", + "test_rpc_getpoxinfo_use_latest_tip", + 40004, + 40005, + 50004, + 50005, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let mut sortdb = peer_server.sortdb.as_mut().unwrap(); + let chainstate = &mut peer_server.stacks_node.as_mut().unwrap().chainstate; + let stacks_block_id = chainstate + .unconfirmed_state + .as_ref() + .unwrap() + .unconfirmed_chain_tip + .clone(); + let pox_info = RPCPoxInfoData::from_db( + &mut sortdb, + chainstate, + &stacks_block_id, + &peer_client.config.burnchain, + ) + .unwrap(); + *pox_server_info.borrow_mut() = Some(pox_info); + convo_client.new_getpoxinfo(TipRequest::UseLatestUnconfirmedTip) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + match http_response { + HttpResponseType::PoxInfo(response_md, pox_data) => { + assert_eq!(Some((*pox_data).clone()), *pox_server_info.borrow()); + true + } + _ => { + error!("Invalid response: {:?}", &http_response); + false + } + } + }, + ); + } + + #[test] + #[ignore] + fn test_rpc_getneighbors() { + test_rpc( + "test_rpc_getneighbors", 40010, 40011, 50010, 50011, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -3544,6 +4006,94 @@ mod test { ); } + #[test] + #[ignore] + fn test_rpc_getheaders() { + let server_blocks_cell = RefCell::new(None); + + test_rpc( + "test_rpc_getheaders", + 40012, + 40013, + 50012, + 50013, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + // have "server" peer store a few continuous block to staging + let mut blocks: Vec = vec![]; + let mut index_block_hashes = vec![]; + for i in 0..25 { + let mut peer_server_block = make_codec_test_block(25); + + peer_server_block.header.total_work.work = (i + 1) as u64; + peer_server_block.header.total_work.burn = (i + 1) as u64; + peer_server_block.header.parent_block = blocks + .last() + .map(|blk| blk.block_hash()) + .unwrap_or(BlockHeaderHash([0u8; 32])); + + let peer_server_consensus_hash = ConsensusHash([(i + 1) as u8; 20]); + let index_block_hash = StacksBlockHeader::make_index_block_hash( + &peer_server_consensus_hash, + &peer_server_block.block_hash(), + ); + + test_debug!("Store peer server index block {:?}", &index_block_hash); + store_staging_block( + peer_server.chainstate(), + &peer_server_consensus_hash, + &peer_server_block, + &ConsensusHash([i as u8; 20]), + 456, + 123, + ); + set_block_processed( + peer_server.chainstate(), + &peer_server_consensus_hash, + &peer_server_block.block_hash(), + true, + ); + + index_block_hashes.push(index_block_hash); + blocks.push(peer_server_block); + } + + let rev_blocks: Vec<_> = blocks.into_iter().rev().collect(); + let rev_ibhs: Vec<_> = index_block_hashes.into_iter().rev().collect(); + + let tip = rev_ibhs[0].clone(); + *server_blocks_cell.borrow_mut() = Some((rev_blocks, rev_ibhs)); + + // now ask for it + convo_client.new_getheaders(25, TipRequest::SpecificTip(tip)) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + match http_response { + HttpResponseType::Headers(response_md, headers) => { + assert_eq!(headers.len(), 25); + let expected = server_blocks_cell.borrow().clone().unwrap(); + for (i, h) in headers.iter().enumerate() { + assert_eq!(h.header, expected.0[i].header); + assert_eq!(h.consensus_hash, ConsensusHash([(25 - i) as u8; 20])); + if i + 1 < headers.len() { + assert_eq!(h.parent_block_id, expected.1[i + 1]); + } + } + true + } + _ => { + error!("Invalid response: {:?}", &http_response); + false + } + } + }, + ); + } + #[test] #[ignore] fn test_rpc_unconfirmed_getblock() { @@ -3555,6 +4105,7 @@ mod test { 40021, 50020, 50021, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -3612,6 +4163,7 @@ mod test { 40031, 50030, 50031, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -3675,6 +4227,7 @@ mod test { 40041, 50040, 50041, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -3787,6 +4340,7 @@ mod test { 40043, 50042, 50043, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -3895,6 +4449,7 @@ mod test { 40051, 50050, 50051, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -3958,6 +4513,7 @@ mod test { 40053, 50052, 50053, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4030,6 +4586,7 @@ mod test { 40061, 50060, 50061, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4066,6 +4623,7 @@ mod test { 40071, 50070, 50071, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4098,10 +4656,11 @@ mod test { fn test_rpc_missing_confirmed_getmicroblocks() { test_rpc( "test_rpc_missing_confirmed_getmicroblocks", - 40070, - 40071, - 50070, - 50071, + 40072, + 40073, + 50072, + 50073, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4140,6 +4699,7 @@ mod test { 40081, 50080, 50081, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4187,12 +4747,18 @@ mod test { #[test] #[ignore] fn test_rpc_get_contract_src() { + // Test v2/contracts/source (aka GetContractSrc) endpoint. + // In this test, we don't set any tip parameters, and allow the endpoint to execute against + // the canonical Stacks tip. + // The contract source we are querying for exists in the anchored state, so we expect the + // query to succeed. test_rpc( "test_rpc_get_contract_src", 40090, 40091, 50090, 50091, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4201,7 +4767,7 @@ mod test { StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") .unwrap(), "hello-world".try_into().unwrap(), - None, + TipRequest::UseLatestAnchoredTip, false, ) }, @@ -4223,13 +4789,61 @@ mod test { #[test] #[ignore] - fn test_rpc_get_contract_src_unconfirmed() { + fn test_rpc_get_contract_src_unconfirmed_with_canonical_tip() { + // Test v2/contracts/source (aka GetContractSrc) endpoint. + // In this test, we don't set any tip parameters, and allow the endpoint to execute against + // the canonical Stacks tip. + // The contract source we are querying for only exists in the unconfirmed state, so we + // expect the query to fail. test_rpc( - "test_rpc_get_contract_src_unconfirmed", + "test_rpc_get_contract_src_unconfirmed_with_canonical_tip", 40100, 40101, 50100, 50101, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + convo_client.new_getcontractsrc( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + false, + ) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + match http_response { + HttpResponseType::NotFound(_, error_str) => { + assert_eq!(error_str, "No contract source data found"); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); + } + + #[test] + #[ignore] + fn test_rpc_get_contract_src_with_unconfirmed_tip() { + // Test v2/contracts/source (aka GetContractSrc) endpoint. + // In this test, we set `tip_req` to be the unconfirmed chain tip. + // The contract source we are querying for exists in the unconfirmed state, so we expect + // the query to succeed. + test_rpc( + "test_rpc_get_contract_src_with_unconfirmed_tip", + 40102, + 40103, + 50102, + 50103, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4244,8 +4858,8 @@ mod test { convo_client.new_getcontractsrc( StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") .unwrap(), - "hello-world".try_into().unwrap(), - Some(unconfirmed_tip), + "hello-world-unconfirmed".try_into().unwrap(), + TipRequest::SpecificTip(unconfirmed_tip), false, ) }, @@ -4253,7 +4867,49 @@ mod test { let req_md = http_request.metadata().clone(); match http_response { HttpResponseType::GetContractSrc(response_md, data) => { - assert_eq!(data.source, TEST_CONTRACT); + assert_eq!(data.source, TEST_CONTRACT_UNCONFIRMED); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); + } + + #[test] + #[ignore] + fn test_rpc_get_contract_src_use_latest_tip() { + // Test v2/contracts/source (aka GetContractSrc) endpoint. + // In this test, we set `tip_req` to UseLatestUnconfirmedTip. + // The contract source we are querying for exists in the unconfirmed state, so we expect + // the query to succeed. + test_rpc( + "test_rpc_get_contract_src_use_latest_tip", + 40104, + 40105, + 50104, + 50105, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + convo_client.new_getcontractsrc( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + false, + ) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + match http_response { + HttpResponseType::GetContractSrc(response_md, data) => { + assert_eq!(data.source, TEST_CONTRACT_UNCONFIRMED); true } _ => { @@ -4274,6 +4930,94 @@ mod test { 40111, 50110, 50111, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + convo_client.new_getaccount( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + TipRequest::UseLatestAnchoredTip, + false, + ) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + match http_response { + HttpResponseType::GetAccount(response_md, data) => { + assert_eq!(data.nonce, 2); + let balance = u128::from_str_radix(&data.balance[2..], 16).unwrap(); + assert_eq!(balance, 1000000000); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); + } + + /// In this test, the query parameter `tip_req` is set to UseLatestUnconfirmedTip, and so we expect the + /// tip used for the query to be the latest microblock. + /// We check that the account state matches the state in the most recent microblock. + #[test] + #[ignore] + fn test_rpc_get_account_use_latest_tip() { + test_rpc( + "test_rpc_get_account_use_latest_tip", + 40112, + 40113, + 50112, + 50113, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + convo_client.new_getaccount( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + TipRequest::UseLatestAnchoredTip, + false, + ) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + match http_response { + HttpResponseType::GetAccount(response_md, data) => { + assert_eq!(data.nonce, 4); + let balance = u128::from_str_radix(&data.balance[2..], 16).unwrap(); + assert_eq!(balance, 999999877); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); + } + + /// In this test, the query parameter `tip_req` is set to UseLatestUnconfirmedTip, but we did not generate + /// microblocks in the rpc test. Thus, we expect the tip used for the query to be the previous + /// anchor block (which is the latest tip). + /// We check that the account state matches the state in the previous anchor block. + #[test] + #[ignore] + fn test_rpc_get_account_use_latest_tip_no_microblocks() { + test_rpc( + "test_rpc_get_account", + 40114, + 40115, + 50114, + 50115, + false, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4282,7 +5026,7 @@ mod test { StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") .unwrap() .to_account_principal(), - None, + TipRequest::UseLatestAnchoredTip, false, ) }, @@ -4313,6 +5057,7 @@ mod test { 40121, 50120, 50121, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4328,7 +5073,7 @@ mod test { StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") .unwrap() .to_account_principal(), - Some(unconfirmed_tip), + TipRequest::SpecificTip(unconfirmed_tip), false, ) }, @@ -4350,15 +5095,149 @@ mod test { ); } + #[test] + #[ignore] + fn test_rpc_get_data_var() { + test_rpc( + "test_rpc_get_data_var", + 40122, + 40123, + 50122, + 50123, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + convo_client.new_getdatavar( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world".try_into().unwrap(), + "bar".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + false, + ) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + match http_response { + HttpResponseType::GetDataVar(response_md, data) => { + assert_eq!( + Value::try_deserialize_hex_untyped(&data.data).unwrap(), + Value::Int(0) + ); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); + } + + #[test] + #[ignore] + fn test_rpc_get_data_var_unconfirmed() { + test_rpc( + "test_rpc_get_data_var_unconfirmed", + 40124, + 40125, + 50124, + 50125, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let unconfirmed_tip = peer_client + .chainstate() + .unconfirmed_state + .as_ref() + .unwrap() + .unconfirmed_chain_tip + .clone(); + convo_client.new_getdatavar( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world".try_into().unwrap(), + "bar".try_into().unwrap(), + TipRequest::SpecificTip(unconfirmed_tip), + false, + ) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + match http_response { + HttpResponseType::GetDataVar(response_md, data) => { + assert_eq!( + Value::try_deserialize_hex_untyped(&data.data).unwrap(), + Value::Int(1) + ); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); + } + + #[test] + #[ignore] + fn test_rpc_get_data_var_nonexistant() { + test_rpc( + "test_rpc_get_data_var_nonexistant", + 40125, + 40126, + 50125, + 50126, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + convo_client.new_getdatavar( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world".try_into().unwrap(), + "bar-nonexistant".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + false, + ) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + match http_response { + HttpResponseType::NotFound(_, msg) => { + assert_eq!(msg, "Data var not found"); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); + } + #[test] #[ignore] fn test_rpc_get_map_entry() { + // Test v2/map_entry (aka GetMapEntry) endpoint. + // In this test, we don't set any tip parameters, and we expect that querying for map data + // against the canonical Stacks tip will succeed. test_rpc( "test_rpc_get_map_entry", 40130, 40131, 50130, 50131, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4376,7 +5255,7 @@ mod test { TupleData::from_data(vec![("account".into(), Value::Principal(principal))]) .unwrap(), ), - None, + TipRequest::UseLatestAnchoredTip, false, ) }, @@ -4406,12 +5285,16 @@ mod test { #[test] #[ignore] fn test_rpc_get_map_entry_unconfirmed() { + // Test v2/map_entry (aka GetMapEntry) endpoint. + // In this test, we set `tip_req` to UseLatestUnconfirmedTip, and we expect that querying for map data + // against the unconfirmed state will succeed. test_rpc( "test_rpc_get_map_entry_unconfirmed", 40140, 40141, 50140, 50141, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4436,7 +5319,61 @@ mod test { TupleData::from_data(vec![("account".into(), Value::Principal(principal))]) .unwrap(), ), - Some(unconfirmed_tip), + TipRequest::SpecificTip(unconfirmed_tip), + false, + ) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + match http_response { + HttpResponseType::GetMapEntry(response_md, data) => { + assert_eq!( + Value::try_deserialize_hex_untyped(&data.data).unwrap(), + Value::some(Value::Tuple( + TupleData::from_data(vec![("units".into(), Value::Int(1))]) + .unwrap() + )) + .unwrap() + ); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); + } + + #[test] + #[ignore] + fn test_rpc_get_map_entry_use_latest_tip() { + test_rpc( + "test_rpc_get_map_entry_use_latest_tip", + 40142, + 40143, + 50142, + 50143, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let principal = + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(); + convo_client.new_getmapentry( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world".try_into().unwrap(), + "unit-map".try_into().unwrap(), + Value::Tuple( + TupleData::from_data(vec![("account".into(), Value::Principal(principal))]) + .unwrap(), + ), + TipRequest::UseLatestAnchoredTip, false, ) }, @@ -4466,12 +5403,16 @@ mod test { #[test] #[ignore] fn test_rpc_get_contract_abi() { + // Test /v2/contracts/interface (aka GetContractABI) endpoint. + // In this test, we don't set any tip parameters, and we expect that querying + // against the canonical Stacks tip will succeed. test_rpc( "test_rpc_get_contract_abi", 40150, 40151, 50150, 50151, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4480,7 +5421,7 @@ mod test { StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") .unwrap(), "hello-world-unconfirmed".try_into().unwrap(), - None, + TipRequest::UseLatestAnchoredTip, ) }, |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { @@ -4502,12 +5443,16 @@ mod test { #[test] #[ignore] fn test_rpc_get_contract_abi_unconfirmed() { + // Test /v2/contracts/interface (aka GetContractABI) endpoint. + // In this test, we set `tip_req` to UseLatestUnconfirmedTip, and we expect that querying + // against the unconfirmed state will succeed. test_rpc( "test_rpc_get_contract_abi_unconfirmed", - 40160, - 40161, - 50160, - 50161, + 40152, + 40153, + 50152, + 50153, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4523,7 +5468,41 @@ mod test { StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") .unwrap(), "hello-world-unconfirmed".try_into().unwrap(), - Some(unconfirmed_tip), + TipRequest::SpecificTip(unconfirmed_tip), + ) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + match http_response { + HttpResponseType::GetContractABI(response_md, data) => true, + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); + } + + #[test] + #[ignore] + fn test_rpc_get_contract_abi_use_latest_tip() { + test_rpc( + "test_rpc_get_contract_abi_use_latest_tip", + 40154, + 40155, + 50154, + 50155, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + convo_client.new_getcontractabi( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, ) }, |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { @@ -4542,12 +5521,16 @@ mod test { #[test] #[ignore] fn test_rpc_call_read_only() { + // Test /v2/contracts/call-read (aka CallReadOnlyFunction) endpoint. + // In this test, we don't set any tip parameters, and we expect that querying + // against the canonical Stacks tip will succeed. test_rpc( "test_rpc_call_read_only", 40170, 40171, 50170, 50171, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4561,7 +5544,7 @@ mod test { .to_account_principal(), "ro-test".try_into().unwrap(), vec![], - None, + TipRequest::UseLatestAnchoredTip, ) }, |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { @@ -4583,6 +5566,57 @@ mod test { ); } + #[test] + #[ignore] + fn test_rpc_call_read_only_use_latest_tip() { + // Test /v2/contracts/call-read (aka CallReadOnlyFunction) endpoint. + // In this test, we set `tip_req` to UseLatestUnconfirmedTip, and we expect that querying + // against the unconfirmed state will succeed. + test_rpc( + "test_rpc_call_read_only_use_latest_tip", + 40172, + 40173, + 50172, + 50173, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + convo_client.new_callreadonlyfunction( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + "ro-test".try_into().unwrap(), + vec![], + TipRequest::UseLatestAnchoredTip, + ) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + match http_response { + HttpResponseType::CallReadOnlyFunction(response_md, data) => { + assert!(data.okay); + assert_eq!( + Value::try_deserialize_hex_untyped(&data.result.clone().unwrap()) + .unwrap(), + Value::okay(Value::Int(1)).unwrap() + ); + assert!(data.cause.is_none()); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); + } + #[test] #[ignore] fn test_rpc_call_read_only_unconfirmed() { @@ -4592,6 +5626,7 @@ mod test { 40181, 50180, 50181, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4612,7 +5647,7 @@ mod test { .to_account_principal(), "ro-test".try_into().unwrap(), vec![], - Some(unconfirmed_tip), + TipRequest::SpecificTip(unconfirmed_tip), ) }, |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { @@ -4642,10 +5677,11 @@ mod test { fn test_rpc_getattachmentsinv_limit_reached() { test_rpc( "test_rpc_getattachmentsinv", - 40000, - 40001, - 50000, - 50001, + 40190, + 40191, + 50190, + 50191, + true, |ref mut peer_client, ref mut convo_client, ref mut peer_server, @@ -4657,7 +5693,7 @@ mod test { let req_md = http_request.metadata().clone(); println!("{:?}", http_response); match http_response { - HttpResponseType::ServerError(_, msg) => { + HttpResponseType::BadRequest(_, msg) => { assert_eq!( msg, "Number of attachment inv pages is limited by 8 per request" @@ -4669,4 +5705,77 @@ mod test { }, ); } + + #[test] + #[ignore] + fn test_rpc_mempool_query_txtags() { + test_rpc( + "test_rpc_mempool_query_txtags", + 40813, + 40814, + 50813, + 50814, + false, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + convo_client.new_mempool_query( + MemPoolSyncData::TxTags([0u8; 32], vec![]), + Some(Txid([0u8; 32])), + ) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + println!("{:?}", http_response); + match http_response { + HttpResponseType::MemPoolTxs(_, _, txs) => { + // got everything + assert_eq!(txs.len(), 10); + true + } + _ => false, + } + }, + ); + } + + #[test] + #[ignore] + fn test_rpc_mempool_query_bloom() { + test_rpc( + "test_rpc_mempool_query_bloom", + 40815, + 40816, + 50815, + 50816, + false, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + // empty bloom filter + convo_client.new_mempool_query( + MemPoolSyncData::BloomFilter(BloomFilter::new( + BLOOM_COUNTER_ERROR_RATE, + MAX_BLOOM_COUNTER_TXS, + BloomNodeHasher::new(&[0u8; 32]), + )), + Some(Txid([0u8; 32])), + ) + }, + |ref http_request, ref http_response, ref mut peer_client, ref mut peer_server| { + let req_md = http_request.metadata().clone(); + println!("{:?}", http_response); + match http_response { + HttpResponseType::MemPoolTxs(_, _, txs) => { + // got everything + assert_eq!(txs.len(), 10); + true + } + _ => false, + } + }, + ); + } } diff --git a/src/net/server.rs b/src/net/server.rs index cb219869f5..bdf62bab08 100644 --- a/src/net/server.rs +++ b/src/net/server.rs @@ -215,6 +215,7 @@ impl HttpPeer { fn register_http( &mut self, network_state: &mut NetworkState, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, event_id: usize, mut socket: mio_net::TcpStream, @@ -269,7 +270,7 @@ impl HttpPeer { } // prime the socket - match HttpPeer::saturate_http_socket(&mut socket, &mut new_convo, chainstate) { + match HttpPeer::saturate_http_socket(&mut socket, &mut new_convo, mempool, chainstate) { Ok(_) => {} Err(e) => { let _ = network_state.deregister(event_id, &socket); @@ -344,11 +345,12 @@ impl HttpPeer { pub fn saturate_http_socket( client_sock: &mut mio::net::TcpStream, convo: &mut ConversationHttp, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, ) -> Result<(), net_error> { // saturate the socket loop { - let send_res = convo.send(client_sock, chainstate); + let send_res = convo.send(client_sock, mempool, chainstate); match send_res { Err(e) => { debug!("Failed to send data to socket {:?}: {:?}", &client_sock, &e); @@ -370,6 +372,7 @@ impl HttpPeer { fn process_new_sockets( &mut self, network_state: &mut NetworkState, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, poll_state: &mut NetworkPollState, ) -> Result, net_error> { @@ -402,9 +405,15 @@ impl HttpPeer { continue; } - if let Err(_e) = - self.register_http(network_state, chainstate, event_id, client_sock, None, None) - { + if let Err(_e) = self.register_http( + network_state, + mempool, + chainstate, + event_id, + client_sock, + None, + None, + ) { // NOTE: register_http will deregister the socket for us continue; } @@ -453,8 +462,12 @@ impl HttpPeer { ), ) { Ok(_) => { - match HttpPeer::saturate_http_socket(client_sock, convo, chainstate) - { + match HttpPeer::saturate_http_socket( + client_sock, + convo, + mempool, + chainstate, + ) { Ok(_) => {} Err(e) => { debug!( @@ -504,7 +517,7 @@ impl HttpPeer { if !convo_dead { // (continue) sending out data in this conversation, if the conversation is still // ongoing - match HttpPeer::saturate_http_socket(client_sock, convo, chainstate) { + match HttpPeer::saturate_http_socket(client_sock, convo, mempool, chainstate) { Ok(_) => {} Err(e) => { debug!( @@ -528,6 +541,7 @@ impl HttpPeer { fn process_connecting_sockets( &mut self, network_state: &mut NetworkState, + mempool: &MemPoolDB, chainstate: &mut StacksChainState, poll_state: &mut NetworkPollState, ) -> () { @@ -535,10 +549,12 @@ impl HttpPeer { if self.connecting.contains_key(event_id) { let (socket, data_url, initial_request_opt, _) = self.connecting.remove(event_id).unwrap(); + debug!("HTTP event {} connected ({:?})", event_id, &data_url); if let Err(_e) = self.register_http( network_state, + mempool, chainstate, *event_id, socket, @@ -623,12 +639,16 @@ impl HttpPeer { /// Flush outgoing replies, but don't block. /// Drop broken handles. /// Return the list of conversation event IDs to close (i.e. they're broken, or the request is done) - fn flush_conversations(&mut self, chainstate: &mut StacksChainState) -> Vec { + fn flush_conversations( + &mut self, + mempool: &MemPoolDB, + chainstate: &mut StacksChainState, + ) -> Vec { let mut close = vec![]; // flush each outgoing conversation for (event_id, ref mut convo) in self.peers.iter_mut() { - match convo.try_flush(chainstate) { + match convo.try_flush(mempool, chainstate) { Ok(_) => {} Err(_e) => { info!("Broken HTTP connection {:?}: {:?}", convo, &_e); @@ -662,10 +682,10 @@ impl HttpPeer { handler_args: &RPCHandlerArgs, ) -> Result, net_error> { // set up new inbound conversations - self.process_new_sockets(network_state, chainstate, &mut poll_state)?; + self.process_new_sockets(network_state, mempool, chainstate, &mut poll_state)?; // set up connected sockets - self.process_connecting_sockets(network_state, chainstate, &mut poll_state); + self.process_connecting_sockets(network_state, mempool, chainstate, &mut poll_state); // run existing conversations, clear out broken ones, and get back messages forwarded to us let (stacks_msgs, error_events) = self.process_ready_sockets( @@ -682,7 +702,7 @@ impl HttpPeer { } // move conversations along - let close_events = self.flush_conversations(chainstate); + let close_events = self.flush_conversations(mempool, chainstate); for close_event in close_events { debug!("Close HTTP connection on event {}", close_event); self.deregister_http(network_state, close_event); @@ -717,7 +737,6 @@ mod test { use crate::types::chainstate::BlockHeaderHash; use burnchains::*; use chainstate::stacks::db::blocks::test::*; - use chainstate::stacks::db::BlockStreamData; use chainstate::stacks::db::StacksChainState; use chainstate::stacks::test::*; use chainstate::stacks::Error as chain_error; diff --git a/src/types/chainstate.rs b/src/types/chainstate.rs index 7e9a07e899..013d26a187 100644 --- a/src/types/chainstate.rs +++ b/src/types/chainstate.rs @@ -9,6 +9,11 @@ use util::hash::{to_hex, Hash160, Sha512Trunc256Sum, HASH160_ENCODED_SIZE}; use util::secp256k1::MessageSignature; use util::vrf::VRFProof; +use serde::de::Deserialize; +use serde::de::Error as de_Error; +use serde::ser::Error as ser_Error; +use serde::Serialize; + use types::proof::TrieHash; #[derive(Serialize, Deserialize)] diff --git a/src/types/proof.rs b/src/types/proof.rs index 07b09aba83..a7262c1d89 100644 --- a/src/types/proof.rs +++ b/src/types/proof.rs @@ -10,6 +10,12 @@ impl_array_hexstring_fmt!(TrieHash); impl_byte_array_newtype!(TrieHash, u8, 32); impl_byte_array_serde!(TrieHash); +impl Default for TrieHash { + fn default() -> TrieHash { + TrieHash([0x00; 32]) + } +} + pub const TRIEHASH_ENCODED_SIZE: usize = 32; #[derive(Debug)] diff --git a/src/util/bloom.rs b/src/util/bloom.rs new file mode 100644 index 0000000000..c485767850 --- /dev/null +++ b/src/util/bloom.rs @@ -0,0 +1,1016 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2021 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cmp; +use std::collections::HashMap; +use std::hash::Hasher; +use std::io::{Read, Seek, SeekFrom, Write}; + +use util::hash::Sha512Trunc256Sum; + +use siphasher::sip::SipHasher; // this is SipHash-2-4 + +use codec::Error as codec_error; +use codec::StacksMessageCodec; +use codec::{read_next, write_next}; + +use util::db::query_expect_row; +use util::db::Error as db_error; +use util::db::{DBConn, DBTx}; + +use util::hash::to_hex; + +use rusqlite::blob::Blob; +use rusqlite::Error as sqlite_error; +use rusqlite::Row; +use rusqlite::ToSql; +use rusqlite::NO_PARAMS; + +use rand::prelude::*; +use rand::thread_rng; + +/// A field of bits of known length! +#[derive(Debug, Clone, PartialEq)] +struct BitField(Vec, u32); + +impl BitField { + /// Make a new bitfield with sz bits represented (rounded up to the nearest byte in space used) + pub fn new(sz: u32) -> BitField { + BitField(vec![0u8; BITVEC_LEN!(sz) as usize], sz) + } + + pub fn num_bits(&self) -> u32 { + self.1 + } + + pub fn test(&self, bit: u32) -> bool { + if bit >= self.1 { + panic!("Attempted to read beyind end of bitfield"); + } + self.0[(bit / 8) as usize] & (1u8 << ((bit % 8) as u8)) != 0 + } + + pub fn set(&mut self, bit: u32) { + if bit >= self.1 { + panic!("Attempted to write beyond end of bitfield"); + } + self.0[(bit / 8) as usize] |= 1u8 << ((bit % 8) as u8); + } + + pub fn clear(&mut self, bit: u32) { + if bit >= self.1 { + panic!("Attempted to write beyond end of bitfield"); + } + self.0[(bit / 8) as usize] &= !(1u8 << ((bit % 8) as u8)); + } +} + +/// Codec enum for how a bloom filter bitfield's fields are encoded +#[repr(u8)] +#[derive(Debug, Clone, PartialEq)] +enum BitFieldEncoding { + Sparse = 0x01, + Full = 0x02, +} + +/// Encode the inner count array, using a sparse representation if it would save space +fn encode_bitfield(fd: &mut W, bytes: &Vec) -> Result<(), codec_error> { + let mut num_filled = 0; + for bits in bytes.iter() { + if *bits > 0 { + num_filled += 1; + } + } + + if num_filled * 5 + 4 < bytes.len() { + // more efficient to encode as (4-byte-index, 1-byte-value) pairs, with an extra 4-byte header + write_next(fd, &(BitFieldEncoding::Sparse as u8))?; + write_next(fd, &(bytes.len() as u32))?; + write_next(fd, &(num_filled as u32))?; + for (i, bits) in bytes.iter().enumerate() { + if *bits > 0 { + write_next(fd, &(i as u32))?; + write_next(fd, bits)?; + } + } + } else { + // more efficient to encode as-is + // (note that the array has a 4-byte length prefix) + write_next(fd, &(BitFieldEncoding::Full as u8))?; + write_next(fd, bytes)?; + } + Ok(()) +} + +/// Decode the inner count array, depending on whether or not it's sparse +fn decode_bitfield(fd: &mut R) -> Result, codec_error> { + let encoding: u8 = read_next(fd)?; + match encoding { + x if x == BitFieldEncoding::Sparse as u8 => { + // sparse encoding + let vec_len: u32 = read_next(fd)?; + let num_filled: u32 = read_next(fd)?; + + let mut ret = vec![0u8; vec_len as usize]; + for _ in 0..num_filled { + let idx: u32 = read_next(fd)?; + if idx >= vec_len { + return Err(codec_error::DeserializeError(format!( + "Index overflow: {} >= {}", + idx, vec_len + ))); + } + let value: u8 = read_next(fd)?; + ret[idx as usize] = value; + } + + Ok(ret) + } + x if x == BitFieldEncoding::Full as u8 => { + // full encoding + let ret: Vec = read_next(fd)?; + Ok(ret) + } + _ => Err(codec_error::DeserializeError(format!( + "Unrecognized bloom count encoding: {}", + encoding + ))), + } +} + +impl StacksMessageCodec for BitField { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &self.1)?; + // no need to write the array length prefix -- we already know it, per the above + encode_bitfield(fd, &self.0)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let num_bits: u32 = read_next(fd)?; + let bits: Vec = decode_bitfield(fd)?; + Ok(BitField(bits, num_bits)) + } +} + +/// A node-specific collection of Bloom function hashes. +/// Works by using a node-local salt to ensure that the hash functions used to insert data into the +/// bloom structure will be unique (w.h.p.) to this node. +#[derive(Debug, Clone, PartialEq)] +pub struct BloomNodeHasher { + seed: [u8; 32], +} + +impl std::fmt::Display for BloomNodeHasher { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(f, "BloomNodeHasher({})", to_hex(&self.seed)) + } +} + +impl BloomNodeHasher { + pub fn new(node_seed: &[u8]) -> BloomNodeHasher { + let seed = Sha512Trunc256Sum::from_data(node_seed).0; + BloomNodeHasher { seed } + } + + pub fn new_random() -> BloomNodeHasher { + let mut seed = [0u8; 32]; + thread_rng().fill(&mut seed[..]); + BloomNodeHasher::new(&seed) + } +} + +impl StacksMessageCodec for BloomNodeHasher { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &(BloomHashID::BloomNodeHasher as u8))?; + write_next(fd, &self.seed)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let hasher_type_u8: u8 = read_next(fd)?; + match hasher_type_u8 as u8 { + x if x == BloomHashID::BloomNodeHasher as u8 => { + let seed: [u8; 32] = read_next(fd)?; + Ok(BloomNodeHasher { seed }) + } + _ => Err(codec_error::DeserializeError(format!( + "Not a supported bloom hasher type ID: {}", + hasher_type_u8 + ))), + } + } +} + +/// A trait for picking a bin that will be set in a bloom struct +pub trait BloomHash { + fn get_seed(&self) -> &[u8; 32]; + fn pick_bin(&self, count: u32, data: &[u8], num_bins: u32) -> u32; +} + +/// Basic bloom filter with a given hash implementation that can suitably provide a given number of +/// distinct hash functions. +#[derive(Debug, Clone, PartialEq)] +pub struct BloomFilter { + hasher: H, + bits: BitField, + num_hashes: u32, +} + +impl std::fmt::Display for BloomFilter { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!( + f, + "BloomFilter({},nbits={},bits={})", + &self.hasher, + self.bits.1, + Sha512Trunc256Sum::from_data(&self.bits.0) + ) + } +} + +/// Parameter calculation for bloom filters. +/// Returns (number of bins, number of hash functions) +fn bloom_hash_count(error_rate: f64, max_items: u32) -> (u32, u32) { + // https://stackoverflow.com/questions/658439/how-many-hash-functions-does-my-bloom-filter-need + let num_slots = + (((-(max_items as f64)) * error_rate.ln()) / (2.0f64.ln() * 2.0f64.ln())).ceil() as u32; + let num_hashes = ((num_slots as f64) / (max_items as f64) * 2.0f64.ln()).round() as u32; + (num_slots, num_hashes) +} + +/// Codec enum for the types of hashers we support +#[repr(u8)] +#[derive(Debug, Clone, PartialEq)] +enum BloomHashID { + BloomNodeHasher = 0x01, +} + +impl BloomFilter { + /// Make a new bloom filter with a given error rate and expected maximum size + pub fn new(error_rate: f64, max_items: u32, hasher: H) -> BloomFilter { + let (num_bits, num_hashes) = bloom_hash_count(error_rate, max_items); + BloomFilter { + hasher, + bits: BitField::new(num_bits), + num_hashes, + } + } + + /// Add a raw item, represented as a byte array (e.g. a serialized struct, perhaps) + pub fn insert_raw(&mut self, item: &[u8]) -> bool { + let mut false_positive = true; + for i in 0..self.num_hashes { + let slot = self.hasher.pick_bin(i, item, self.bits.num_bits()); + assert!( + slot < self.bits.num_bits(), + "BUG: hasher selected a slot outside the bitfield: {}", + slot + ); + + if false_positive && !self.bits.test(slot) { + false_positive = false; + } + + self.bits.set(slot); + } + false_positive + } + + /// Test to see if a given item (a byte array) is likely present + pub fn contains_raw(&self, item: &[u8]) -> bool { + for i in 0..self.num_hashes { + let slot = self.hasher.pick_bin(i, item, self.bits.num_bits()); + assert!( + slot < self.bits.num_bits(), + "BUG: hasher selected a slot outside the bitfield: {}", + slot + ); + + if !self.bits.test(slot) { + // definitely not here + return false; + } + } + true + } +} + +impl StacksMessageCodec for BloomFilter { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &(BloomHashID::BloomNodeHasher as u8))?; + write_next(fd, &self.hasher.seed)?; + write_next(fd, &self.num_hashes)?; + write_next(fd, &self.bits)?; + Ok(()) + } + + fn consensus_deserialize( + fd: &mut R, + ) -> Result, codec_error> { + let hasher_type_u8: u8 = read_next(fd)?; + match hasher_type_u8 as u8 { + x if x == BloomHashID::BloomNodeHasher as u8 => { + let seed: [u8; 32] = read_next(fd)?; + let num_hashes: u32 = read_next(fd)?; + let bits: BitField = read_next(fd)?; + Ok(BloomFilter { + hasher: BloomNodeHasher { seed }, + bits, + num_hashes, + }) + } + _ => Err(codec_error::DeserializeError(format!( + "Not a supported bloom hasher type ID: {}", + hasher_type_u8 + ))), + } + } +} + +/// Disk-backed counting bloom filter with a given set of hash functions. Uses a sqlite3 blob of +/// 32-bit bins to count things. Meant to work alongside an existing database, in its own table +/// (e.g. the mempool). +#[derive(Debug, Clone, PartialEq)] +pub struct BloomCounter { + hasher: H, + table_name: String, + num_bins: u32, + num_hashes: u32, + counts_rowid: u32, +} + +impl BloomCounter { + /// Make a new bloom counter with the given error rate and expected number of items + pub fn new( + tx: &mut DBTx, + table_name: &str, + error_rate: f64, + max_items: u32, + hasher: H, + ) -> Result, db_error> { + let sql = format!("CREATE TABLE IF NOT EXISTS {}(counts BLOB NOT NULL, num_bins INTEGER NOT NULL, num_hashes INTEGER NOT NULL, hasher BLOB NOT NULL);", table_name); + tx.execute(&sql, NO_PARAMS).map_err(db_error::SqliteError)?; + + let (num_bits, num_hashes) = bloom_hash_count(error_rate, max_items); + let counts_vec = vec![0u8; (num_bits * 4) as usize]; + let hasher_vec = hasher.serialize_to_vec(); + + let sql = format!( + "INSERT INTO {} (counts, num_bins, num_hashes, hasher) VALUES (?1, ?2, ?3, ?4)", + table_name + ); + let args: &[&dyn ToSql] = &[&counts_vec, &num_bits, &num_hashes, &hasher_vec]; + + tx.execute(&sql, args).map_err(db_error::SqliteError)?; + + let sql = format!("SELECT rowid FROM {}", table_name); + let counts_rowid: u64 = query_expect_row(&tx, &sql, NO_PARAMS)? + .expect("BUG: inserted bloom counter but can't find row ID"); + + Ok(BloomCounter { + hasher, + table_name: table_name.to_string(), + num_bins: num_bits, + num_hashes, + counts_rowid: counts_rowid as u32, + }) + } + + pub fn try_load(conn: &DBConn, table_name: &str) -> Result>, db_error> { + let sql = format!("SELECT rowid,* FROM {}", table_name); + let result = conn.query_row_and_then(&sql, NO_PARAMS, |row| { + let mut hasher_blob = row + .get_raw("hasher") + .as_blob() + .expect("Unable to read hasher as blob"); + let hasher = + H::consensus_deserialize(&mut hasher_blob).map_err(|_| db_error::ParseError)?; + let num_bins: u32 = row.get_unwrap("num_bins"); + let num_hashes: u32 = row.get_unwrap("num_hashes"); + let counts_rowid: u32 = row.get_unwrap("rowid"); + Ok(BloomCounter { + hasher, + table_name: table_name.to_string(), + num_bins, + num_hashes, + counts_rowid, + }) + }); + match result { + Ok(x) => Ok(Some(x)), + Err(db_error::SqliteError(sqlite_error::QueryReturnedNoRows)) => Ok(None), + Err(e) => Err(e), + } + } + + pub fn get_seed(&self) -> &[u8; 32] { + self.hasher.get_seed() + } + + /// Get a handle to the underlying bins list + fn open_counts_blob<'a>( + &self, + conn: &'a DBConn, + readwrite: bool, + ) -> Result, db_error> { + let blob = conn.blob_open( + rusqlite::DatabaseName::Main, + &self.table_name, + "counts", + self.counts_rowid.into(), + !readwrite, + )?; + Ok(blob) + } + + /// Get the 32-bit counter at a particular slot. It's loaded from a big-endian representation + /// within the readable handle, at offset 4*slot. + fn get_counts_bin(counts_blob: &mut R, slot: u32) -> u32 { + counts_blob + .seek(SeekFrom::Start((slot as u64) * 4)) + .expect("BUG: failed to seek on counts blob"); + + let mut bytes = [0u8; 4]; + counts_blob + .read_exact(&mut bytes[..]) + .expect("BUG: failed to read from counts blob"); + + u32::from_be_bytes(bytes) + } + + /// Write the 32-bit counter at a particular slot. It's stored in a big-endian representation + /// within the writable handle, at offset 4*slot. + fn set_counts_bin(counts_blob: &mut W, slot: u32, count: u32) { + counts_blob + .seek(SeekFrom::Start((slot as u64) * 4)) + .expect("BUG: failed to seek on counts blob"); + + let bytes = count.to_be_bytes(); + counts_blob + .write_all(&bytes) + .expect("BUG: failed to write to counts blob"); + } + + /// Add a raw item to the bloom counter, and return the count it likely has (as an upper bound) + /// Returns 0 if this item is absolutely new. + /// Returns >0 if this item appears represented already. + pub fn insert_raw(&self, tx: &mut DBTx, item: &[u8]) -> Result { + let mut count = u32::MAX; + let mut fd = self.open_counts_blob(tx, true)?; + + for i in 0..self.num_hashes { + let slot = self.hasher.pick_bin(i, item, self.num_bins); + assert!( + slot < self.num_bins, + "BUG: hasher selected a slot outside the bloom counters" + ); + + let bin = BloomCounter::::get_counts_bin(&mut fd, slot); + count = cmp::min(bin, count); + BloomCounter::::set_counts_bin(&mut fd, slot, bin.saturating_add(1)); + } + + Ok(count) + } + + /// Return the upper bound on the number of times this item has been inserted. + /// It will be 0 if it was never inserted (or was inserted and removed). + pub fn count_raw(&self, conn: &DBConn, item: &[u8]) -> Result { + let mut count = u32::MAX; + let mut fd = self.open_counts_blob(conn, false)?; + + for i in 0..self.num_hashes { + let slot = self.hasher.pick_bin(i, item, self.num_bins); + assert!( + slot < self.num_bins, + "BUG: hasher selected a slot outside the bloom counters" + ); + + let bin = BloomCounter::::get_counts_bin(&mut fd, slot); + if bin == 0 { + return Ok(0); + } else { + count = cmp::min(bin, count); + } + } + Ok(count) + } + + /// Remove an item from the bloom filter. In order to use this correctly, you must ensure that + /// it was actually inserted via insert_raw() earlier. Returns the new lower bound on how many + /// times this item was inserted. + pub fn remove_raw(&self, tx: &mut DBTx, item: &[u8]) -> Result { + if self.count_raw(tx, item)? == 0 { + return Ok(0); + } + + let mut count = u32::MAX; + let mut fd = self.open_counts_blob(tx, true)?; + + for i in 0..self.num_hashes { + let slot = self.hasher.pick_bin(i, item, self.num_bins); + assert!( + slot < self.num_bins, + "BUG: hasher selected a slot outside the bloom counters" + ); + + let bin = BloomCounter::::get_counts_bin(&mut fd, slot); + if bin > 0 { + let new_bin = bin - 1; + BloomCounter::::set_counts_bin(&mut fd, slot, new_bin); + count = cmp::min(new_bin, count); + } else { + panic!("BUG: item is present in the bloom counter, but has a zero count (i = {}, slot = {})", i, slot); + } + } + + Ok(count) + } + + /// Extract a bloom filter from the bloom counter. + /// There will be a 1-bit if the counter is positive + pub fn to_bloom_filter(&self, conn: &DBConn) -> Result, db_error> { + let new_hasher = self.hasher.clone(); + let mut bf = BitField::new(self.num_bins); + + let mut counts_blob = vec![0u8; (self.num_bins as usize) * 4]; + let mut fd = self.open_counts_blob(conn, false)?; + + fd.read_exact(&mut counts_blob).map_err(db_error::IOError)?; + + for i in 0..(self.num_bins as usize) { + if counts_blob[4 * i] > 0 + || counts_blob[4 * i + 1] > 0 + || counts_blob[4 * i + 2] > 0 + || counts_blob[4 * i + 3] > 0 + { + bf.set(i as u32); + } + } + + Ok(BloomFilter { + hasher: new_hasher, + bits: bf, + num_hashes: self.num_hashes, + }) + } +} + +impl BloomHash for BloomNodeHasher { + /// Pick a bin using the node seed and the count. + /// Uses SipHash-2-4, with the count and seed used to set up the hash's initial state (thereby + /// ensuring that a different initial state -- tantamount to a different hash function -- + /// will be used for each of the bloom struct's bins). + /// A cryptographic hash isn't helpful here (and would be considerably slower), since the + /// number of different bins is small enough that someone who's hell-bent on selecting items to + /// create false positives would be able to do so no matter what we do (so why pay a + /// performance penalty if it won't help?). + fn pick_bin(&self, count: u32, data: &[u8], num_bins: u32) -> u32 { + let mut initial_state = Vec::with_capacity(36 + data.len()); + initial_state.extend_from_slice(&count.to_be_bytes()); + initial_state.extend_from_slice(&self.seed); + initial_state.extend_from_slice(data); + + let mut hasher = SipHasher::new(); + hasher.write(&initial_state); + + // be sure to remove modulus bias + loop { + let result_64 = hasher.finish(); + let result = (result_64 & 0x00000000ffffffff) as u32; + if result < u32::MAX - (u32::MAX % num_bins) { + return result % num_bins; + } else { + hasher.write_u64(result_64); + } + } + } + + fn get_seed(&self) -> &[u8; 32] { + &self.seed + } +} + +#[cfg(test)] +pub mod test { + use super::*; + + use std::fs; + + use rand::prelude::*; + use rand::thread_rng; + + use rusqlite::OpenFlags; + + use util::db::{sql_pragma, tx_begin_immediate, tx_busy_handler, DBConn, DBTx}; + + pub fn setup_bloom_counter(db_name: &str) -> DBConn { + let db_path = format!("/tmp/test_bloom_filter_{}.db", db_name); + if fs::metadata(&db_path).is_ok() { + fs::remove_file(&db_path).unwrap(); + } + let open_flags = OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE; + + let conn = DBConn::open_with_flags(&db_path, open_flags).unwrap(); + + conn.busy_handler(Some(tx_busy_handler)).unwrap(); + sql_pragma(&conn, "journal_mode", &"WAL".to_string()).unwrap(); + conn + } + + #[test] + fn test_bloom_hash_count() { + // https://hur.st/bloomfilter/?n=8192&p=0.001&m=&k=8 + let (num_bits, num_hashes) = bloom_hash_count(0.001, 8192); + assert_eq!(num_bits, 117_782); + assert_eq!(num_hashes, 10); + + // https://hur.st/bloomfilter/?n=8192&p=1.0E-7&m=&k= + let (num_bits, num_hashes) = bloom_hash_count(0.0000001, 8192); + assert_eq!(num_bits, 274_823); + assert_eq!(num_hashes, 23); + } + + #[test] + fn test_bloom_filter_has_all_inserted_items_with_error_rate() { + let num_items = 8192; + let err_rate = 0.001; + + let hasher = BloomNodeHasher::new(&[0u8; 32]); + let mut bf = BloomFilter::new(err_rate, num_items, hasher); + + let mut fp_count = 0; // false positives + + for i in 0..num_items { + let mut random_data = [0u8; 32]; + thread_rng().fill(&mut random_data[..]); + + if bf.contains_raw(&random_data) { + fp_count += 1; + } + + bf.insert_raw(&random_data); + assert!(bf.contains_raw(&random_data)); + } + + let calculated_error_rate = (fp_count as f64) / (num_items as f64); + eprintln!( + "fp_count = {}, num_items = {}, err_rate = {}, calculated_error_rate = {}", + fp_count, num_items, err_rate, calculated_error_rate + ); + assert!(calculated_error_rate <= err_rate); + } + + #[test] + fn test_bloom_counter_has_all_inserted_items_with_error_rate() { + let num_items = 8192; + let err_rate = 0.001; + + let mut db = setup_bloom_counter("has_all_inserted_items_with_error_rate"); + let hasher = BloomNodeHasher::new(&[0u8; 32]); + + let bf = { + let mut tx = tx_begin_immediate(&mut db).unwrap(); + let bf = + BloomCounter::new(&mut tx, "bloom_counter", err_rate, num_items, hasher).unwrap(); + tx.commit().unwrap(); + bf + }; + + let mut fp_count = 0; // false positives + + let mut tx = tx_begin_immediate(&mut db).unwrap(); + for i in 0..num_items { + let mut random_data = [0u8; 32]; + thread_rng().fill(&mut random_data[..]); + + if bf.count_raw(&tx, &random_data).unwrap() > 0 { + fp_count += 1; + } + + bf.insert_raw(&mut tx, &random_data).unwrap(); + assert!(bf.count_raw(&tx, &random_data).unwrap() > 0); + } + tx.commit().unwrap(); + + let calculated_error_rate = (fp_count as f64) / (num_items as f64); + eprintln!( + "fp_count = {}, num_items = {}, err_rate = {}, calculated_error_rate = {}", + fp_count, num_items, err_rate, calculated_error_rate + ); + assert!(calculated_error_rate <= err_rate); + } + + #[test] + fn test_bloom_counter_is_invertible() { + let num_items = 8192; + let err_rate = 0.001; + + let mut db = setup_bloom_counter("counter_is_invertible"); + + let hasher = BloomNodeHasher::new(&[0u8; 32]); + + let bf = { + let mut tx = tx_begin_immediate(&mut db).unwrap(); + let bf = + BloomCounter::new(&mut tx, "bloom_counter", err_rate, num_items, hasher).unwrap(); + tx.commit().unwrap(); + bf + }; + + let mut data = vec![]; + let mut fp_count = 0; // false positives + + let mut tx = tx_begin_immediate(&mut db).unwrap(); + for i in 0..num_items { + let mut random_data = [0u8; 32]; + thread_rng().fill(&mut random_data[..]); + + if bf.count_raw(&tx, &random_data).unwrap() > 0 { + fp_count += 1; + } + + bf.insert_raw(&mut tx, &random_data).unwrap(); + assert!(bf.count_raw(&tx, &random_data).unwrap() > 0); + + data.push(random_data); + } + tx.commit().unwrap(); + + let calculated_error_rate = (fp_count as f64) / (num_items as f64); + eprintln!( + "fp_count = {}, num_items = {}, err_rate = {}, calculated_error_rate = {}", + fp_count, num_items, err_rate, calculated_error_rate + ); + assert!(calculated_error_rate <= err_rate); + + let mut tx = tx_begin_immediate(&mut db).unwrap(); + + fp_count = 0; + for random_data in data.iter() { + bf.remove_raw(&mut tx, random_data).unwrap(); + if bf.count_raw(&tx, random_data).unwrap() > 0 { + fp_count += 1; + } + } + tx.commit().unwrap(); + + let calculated_error_rate = (fp_count as f64) / (num_items as f64); + eprintln!( + "fp_count = {}, num_items = {}, err_rate = {}, calculated_error_rate = {}", + fp_count, num_items, err_rate, calculated_error_rate + ); + assert!(calculated_error_rate <= err_rate); + + // everything is removed + for random_data in data.iter() { + assert_eq!(bf.count_raw(&db, random_data).unwrap(), 0); + } + } + + #[test] + fn test_bloom_counter_is_invertible_over_iterations() { + let num_items = 8192; + let err_rate = 0.001; + + let mut db = setup_bloom_counter("counter_is_invertible_over_iterations"); + + let hasher = BloomNodeHasher::new(&[0u8; 32]); + + let bf = { + let mut tx = tx_begin_immediate(&mut db).unwrap(); + let bf = + BloomCounter::new(&mut tx, "bloom_counter", err_rate, num_items, hasher).unwrap(); + tx.commit().unwrap(); + bf + }; + + let mut data = vec![]; + let mut fp_count = 0; // false positives + let remove_delay = 2; + + for i in 0..(remove_delay * 10) { + eprintln!("Add {} items for pass {}", num_items / remove_delay, i); + let mut tx = tx_begin_immediate(&mut db).unwrap(); + for i in 0..(num_items / remove_delay) { + let mut random_data = [0u8; 32]; + thread_rng().fill(&mut random_data[..]); + + if bf.count_raw(&tx, &random_data).unwrap() > 0 { + fp_count += 1; + } + + bf.insert_raw(&mut tx, &random_data).unwrap(); + assert!(bf.count_raw(&tx, &random_data).unwrap() > 0); + + data.push(random_data); + } + tx.commit().unwrap(); + + let calculated_error_rate = (fp_count as f64) / (num_items as f64); + eprintln!( + "fp_count = {}, num_items = {}, err_rate = {}, calculated_error_rate = {}", + fp_count, num_items, err_rate, calculated_error_rate + ); + assert!(calculated_error_rate <= err_rate); + + let mut tx = tx_begin_immediate(&mut db).unwrap(); + + if i + 1 >= remove_delay { + let remove_start = ((num_items / remove_delay) * (i + 1 - remove_delay)) as usize; + let remove_end = remove_start + ((num_items / remove_delay) as usize); + + // this leaves $num_items in the bloom filter + assert_eq!(data.len() - remove_start, num_items as usize); + + let remove_data = &data[remove_start..remove_end]; + eprintln!( + "Remove {} items from pass {}", + remove_data.len(), + i + 1 - remove_delay + ); + fp_count = 0; + for random_data in remove_data.iter() { + bf.remove_raw(&mut tx, random_data).unwrap(); + if bf.count_raw(&tx, random_data).unwrap() > 0 { + fp_count += 1; + } + } + tx.commit().unwrap(); + + let calculated_error_rate = (fp_count as f64) / (num_items as f64); + eprintln!( + "fp_count = {}, num_items = {}, err_rate = {}, calculated_error_rate = {}", + fp_count, num_items, err_rate, calculated_error_rate + ); + assert!(calculated_error_rate <= err_rate); + + // everything is removed, up to fp_rate + let mut check_fp_count = 0; + for random_data in remove_data.iter() { + if bf.count_raw(&db, random_data).unwrap() > 0 { + check_fp_count += 1; + } + } + assert!(check_fp_count <= fp_count); + } + } + } + + #[test] + fn test_bloom_bitfield_codec() { + // aligned, full + let bitfield = BitField( + vec![ + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, + 0xee, 0xff, + ], + 128, + ); + let bytes = bitfield.serialize_to_vec(); + assert_eq!( + bytes, + vec![ + 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x11, 0x22, 0x33, 0x44, + 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff + ] + ); + + assert_eq!( + BitField::consensus_deserialize(&mut &bytes[..]).unwrap(), + bitfield + ); + + // unaligned, full + let bitfield = BitField( + vec![ + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, + 0xee, 0x01, + ], + 121, + ); + let bytes = bitfield.serialize_to_vec(); + assert_eq!( + bytes, + vec![ + 0x00, 0x00, 0x00, 0x79, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x11, 0x22, 0x33, 0x44, + 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0x01 + ] + ); + + assert_eq!( + BitField::consensus_deserialize(&mut &bytes[..]).unwrap(), + bitfield + ); + + // aligned, sparse + let bitfield = BitField( + vec![ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, + ], + 128, + ); + let bytes = bitfield.serialize_to_vec(); + assert_eq!( + bytes, + vec![ + 0x00, 0x00, 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x0f, 0x08 + ] + ); + + // unaligned, sparse + let bitfield = BitField( + vec![ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, + ], + 121, + ); + let bytes = bitfield.serialize_to_vec(); + assert_eq!( + bytes, + vec![ + 0x00, 0x00, 0x00, 0x79, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x0f, 0x08 + ] + ); + } + + #[test] + fn test_bloom_filter_codec() { + let num_items = 8192; + let err_rate = 0.001; + + let hasher = BloomNodeHasher::new(&[0u8; 32]); + let mut bf = BloomFilter::new(err_rate, num_items, hasher); + + for i in 0..num_items { + let encoded_bf = bf.serialize_to_vec(); + let decoded_bf = + BloomFilter::::consensus_deserialize(&mut &encoded_bf[..]) + .unwrap(); + assert_eq!(decoded_bf, bf); + + let mut random_data = [0u8; 32]; + thread_rng().fill(&mut random_data[..]); + + bf.insert_raw(&random_data); + assert!(bf.contains_raw(&random_data)); + } + } + + #[test] + fn test_bloom_counter_to_filter() { + let num_items = 8192; + let err_rate = 0.001; + + let mut db = setup_bloom_counter("has_all_inserted_items_with_error_rate"); + + let hasher = BloomNodeHasher::new(&[0u8; 32]); + + let bc = { + let mut tx = tx_begin_immediate(&mut db).unwrap(); + let bc = + BloomCounter::new(&mut tx, "bloom_counter", err_rate, num_items, hasher).unwrap(); + tx.commit().unwrap(); + bc + }; + + let mut tx = tx_begin_immediate(&mut db).unwrap(); + let mut data = vec![]; + for i in 0..num_items { + let mut random_data = [0u8; 32]; + thread_rng().fill(&mut random_data[..]); + + bc.insert_raw(&mut tx, &random_data).unwrap(); + assert!(bc.count_raw(&tx, &random_data).unwrap() > 0); + + data.push(random_data); + + if i % 128 == 0 { + let bf = bc.to_bloom_filter(&tx).unwrap(); + + for random_data in data.iter() { + assert!(bf.contains_raw(random_data)); + } + } + } + tx.commit().unwrap(); + } +} diff --git a/src/util/db.rs b/src/util/db.rs index 740134b8f3..2f07299572 100644 --- a/src/util/db.rs +++ b/src/util/db.rs @@ -264,6 +264,51 @@ macro_rules! impl_byte_array_from_column { }; } +/// Load the path of the database from the connection +#[cfg(test)] +fn get_db_path(conn: &Connection) -> Result { + let sql = "PRAGMA database_list"; + let path: Result, sqlite_error> = + conn.query_row_and_then(sql, NO_PARAMS, |row| row.get(2)); + match path { + Ok(Some(path)) => Ok(path), + Ok(None) => Ok("".to_string()), + Err(e) => Err(Error::SqliteError(e)), + } +} + +/// Generate debug output to be fed into an external script to examine query plans. +/// TODO: it uses mocked arguments, which it assumes are strings. This does not always result in a +/// valid query. +#[cfg(test)] +fn log_sql_eqp(conn: &Connection, sql_query: &str) { + if std::env::var("BLOCKSTACK_DB_TRACE") != Ok("1".to_string()) { + return; + } + + let mut parts = sql_query.clone().split(" "); + let mut full_sql = if let Some(part) = parts.next() { + part.to_string() + } else { + sql_query.to_string() + }; + + while let Some(part) = parts.next() { + if part.starts_with("?") { + full_sql = format!("{} \"mock_arg\"", full_sql.trim()); + } else { + full_sql = format!("{} {}", full_sql.trim(), part.trim()); + } + } + + let path = get_db_path(conn).unwrap_or("ERROR!".to_string()); + let eqp_sql = format!("\"{}\" EXPLAIN QUERY PLAN {}", &path, full_sql.trim()); + debug!("{}", &eqp_sql); +} + +#[cfg(not(test))] +fn log_sql_eqp(_conn: &Connection, _sql_query: &str) {} + /// boilerplate code for querying rows pub fn query_rows(conn: &Connection, sql_query: &str, sql_args: P) -> Result, Error> where @@ -271,6 +316,7 @@ where P::Item: ToSql, T: FromRow, { + log_sql_eqp(conn, sql_query); let mut stmt = conn.prepare(sql_query)?; let result = stmt.query_and_then(sql_args, |row| T::from_row(row))?; @@ -285,6 +331,7 @@ where P::Item: ToSql, T: FromRow, { + log_sql_eqp(conn, sql_query); let query_result = conn.query_row_and_then(sql_query, sql_args, |row| T::from_row(row)); match query_result { Ok(x) => Ok(Some(x)), @@ -305,6 +352,7 @@ where P::Item: ToSql, T: FromRow, { + log_sql_eqp(conn, sql_query); let mut stmt = conn.prepare(sql_query)?; let mut result = stmt.query_and_then(sql_args, |row| T::from_row(row))?; let mut return_value = None; @@ -331,6 +379,7 @@ where T: FromRow, F: FnOnce() -> String, { + log_sql_eqp(conn, sql_query); let mut stmt = conn.prepare(sql_query)?; let mut result = stmt.query_and_then(sql_args, |row| T::from_row(row))?; let mut return_value = None; @@ -355,8 +404,8 @@ where P::Item: ToSql, T: FromColumn, { + log_sql_eqp(conn, sql_query); let mut stmt = conn.prepare(sql_query)?; - let mut rows = stmt.query(sql_args)?; // gather @@ -375,10 +424,9 @@ where P: IntoIterator, P::Item: ToSql, { + log_sql_eqp(conn, sql_query); let mut stmt = conn.prepare(sql_query)?; - let mut rows = stmt.query(sql_args)?; - let mut row_data = vec![]; while let Some(row) = rows.next().map_err(|e| Error::SqliteError(e))? { if row_data.len() > 0 { diff --git a/src/util/mod.rs b/src/util/mod.rs index e3264b52a8..d82b95e878 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -20,6 +20,8 @@ pub mod log; pub mod macros; #[macro_use] pub mod db; + +pub mod bloom; pub mod boot; pub mod hash; pub mod pair; diff --git a/src/util/retry.rs b/src/util/retry.rs index 915de7a836..677e5b902d 100644 --- a/src/util/retry.rs +++ b/src/util/retry.rs @@ -55,8 +55,8 @@ impl<'a, R: Read> RetryReader<'a, R> { fn read_and_buffer(&mut self, buf: &mut [u8]) -> io::Result { let nr = self.fd.read(buf)?; - self.buf.extend_from_slice(buf); - self.i += buf.len(); + self.buf.extend_from_slice(&buf[0..nr]); + self.i += nr; Ok(nr) } } diff --git a/src/vm/database/mod.rs b/src/vm/database/mod.rs index 5a277e0a46..6f637e6710 100644 --- a/src/vm/database/mod.rs +++ b/src/vm/database/mod.rs @@ -17,7 +17,7 @@ use std::collections::HashMap; pub use self::clarity_db::{ - BurnStateDB, ClarityDatabase, HeadersDB, NULL_BURN_STATE_DB, NULL_HEADER_DB, + BurnStateDB, ClarityDatabase, HeadersDB, StoreType, NULL_BURN_STATE_DB, NULL_HEADER_DB, STORE_CONTRACT_SRC_INTERFACE, }; pub use self::clarity_store::ClarityBackingStore; diff --git a/testnet/puppet-chain/Cargo.toml b/testnet/puppet-chain/Cargo.toml index db5c74213b..f35c426b96 100644 --- a/testnet/puppet-chain/Cargo.toml +++ b/testnet/puppet-chain/Cargo.toml @@ -5,12 +5,12 @@ authors = ["Ludo Galabru "] edition = "2018" [dependencies] -async-h1 = "=1.0" -async-std = { version = "<1.6", features = ["attributes"] } +async-h1 = "2.3.2" +async-std = { version = "1.6", features = ["attributes"] } base64 = "0.12.0" -http-types = "1.0" +http-types = "2.12" serde = "1" serde_derive = "1" serde_json = { version = "1.0", features = ["arbitrary_precision"] } toml = "0.5" -rand = "=0.7.2" +rand = "0.7.2" diff --git a/testnet/puppet-chain/src/main.rs b/testnet/puppet-chain/src/main.rs index df87599e9b..28fdaaf2cd 100644 --- a/testnet/puppet-chain/src/main.rs +++ b/testnet/puppet-chain/src/main.rs @@ -142,27 +142,26 @@ async fn main() -> http_types::Result<()> { let should_ignore_txs = config.should_ignore_transactions(effective_block_height - 1); let stream = stream?; - let addr = addr.clone(); if should_ignore_txs { // Returns ok println!("Buffering request from {}", stream.peer_addr()?); - async_h1::accept(&addr, stream.clone(), |_| async { + async_h1::accept(stream.clone(), |_| async { Ok(Response::new(StatusCode::Ok)) }) .await?; // Enqueue request - buffered_requests.push_back((addr, stream)); + buffered_requests.push_back(stream); } else { // Dequeue all the requests we've been buffering - while let Some((addr, stream)) = buffered_requests.pop_front() { + while let Some(stream) = buffered_requests.pop_front() { let config = config.clone(); task::spawn(async move { println!( "Dequeuing buffered request from {}", stream.peer_addr().unwrap() ); - if let Err(err) = accept(addr, stream, &config).await { + if let Err(err) = accept(stream, &config).await { eprintln!("{}", err); } }); @@ -171,7 +170,7 @@ async fn main() -> http_types::Result<()> { let config = config.clone(); task::spawn(async move { println!("Handling request from {}", stream.peer_addr().unwrap()); - if let Err(err) = accept(addr, stream, &config).await { + if let Err(err) = accept(stream, &config).await { eprintln!("{}", err); } }); @@ -181,8 +180,8 @@ async fn main() -> http_types::Result<()> { } // Take a TCP stream, and convert it into sequential HTTP request / response pairs. -async fn accept(addr: String, stream: TcpStream, config: &ConfigFile) -> http_types::Result<()> { - async_h1::accept(&addr, stream.clone(), |mut req| async { +async fn accept(stream: TcpStream, config: &ConfigFile) -> http_types::Result<()> { + async_h1::accept(stream.clone(), |mut req| async { match ( req.method(), req.url().path(), @@ -352,12 +351,9 @@ async fn generate_blocks(blocks_count: u64, address: String, config: &ConfigFile fn build_request(config: &ConfigFile, body: Vec) -> Request { let url = Url::parse(&format!("http://{}/", config.network.bitcoind_rpc_host)).unwrap(); let mut req = Request::new(Method::Post, url); - req.append_header("Authorization", config.network.authorization_token()) - .unwrap(); - req.append_header("Content-Type", "application/json") - .unwrap(); - req.append_header("Host", format!("{}", config.network.bitcoind_rpc_host)) - .unwrap(); + req.append_header("Authorization", config.network.authorization_token()); + req.append_header("Content-Type", "application/json"); + req.append_header("Host", format!("{}", config.network.bitcoind_rpc_host)); req.set_body(body); req } diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index d1b9e22be1..0959c18942 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -7,16 +7,16 @@ edition = "2018" [dependencies] lazy_static = "1.4.0" pico-args = "0.3.1" -rand = "=0.7.2" +rand = "0.7.3" serde = "1" serde_derive = "1" serde_json = { version = "1.0", features = ["arbitrary_precision", "raw_value"] } stacks = { package = "blockstack-core", path = "../../." } stx_genesis = { package = "stx-genesis", path = "../../stx-genesis/."} toml = "0.5.6" -async-h1 = "=1.0" -async-std = { version = "<1.6", features = ["attributes"] } -http-types = "1.0" +async-h1 = "2.3.2" +async-std = { version = "1.6", features = ["attributes"] } +http-types = "2.12" base64 = "0.12.0" backtrace = "0.3.50" libc = "0.2" @@ -24,9 +24,9 @@ slog = { version = "2.5.2", features = [ "max_level_trace" ] } [dev-dependencies] ring = "0.16.19" -warp = "0.2" -tokio = "0.2.21" -reqwest = { version = "0.10", features = ["blocking", "json", "rustls"] } +warp = "0.3" +tokio = "1.15" +reqwest = { version = "0.11", features = ["blocking", "json", "rustls"] } [dev-dependencies.rusqlite] version = "=0.24.2" diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 9bc21fbaa0..95f83b6683 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -67,7 +67,7 @@ const DUST_UTXO_LIMIT: u64 = 5500; pub struct BitcoinRegtestController { config: Config, - indexer_config: BitcoinIndexerConfig, + indexer: BitcoinIndexer, db: Option, burnchain_db: Option, chain_tip: Option, @@ -242,10 +242,17 @@ impl BitcoinRegtestController { } }; + let (_, network_type) = config.burnchain.get_bitcoin_network(); + let indexer_runtime = BitcoinIndexerRuntime::new(network_type); + let burnchain_indexer = BitcoinIndexer { + config: indexer_config.clone(), + runtime: indexer_runtime, + }; + Self { use_coordinator: coordinator_channel, config, - indexer_config, + indexer: burnchain_indexer, db: None, burnchain_db: None, chain_tip: None, @@ -279,10 +286,17 @@ impl BitcoinRegtestController { } }; + let (_, network_type) = config.burnchain.get_bitcoin_network(); + let indexer_runtime = BitcoinIndexerRuntime::new(network_type); + let burnchain_indexer = BitcoinIndexer { + config: indexer_config.clone(), + runtime: indexer_runtime, + }; + Self { use_coordinator: None, config, - indexer_config, + indexer: burnchain_indexer, db: None, burnchain_db: None, chain_tip: None, @@ -321,21 +335,10 @@ impl BitcoinRegtestController { } } - fn setup_indexer_runtime(&self) -> (Burnchain, BitcoinIndexer) { - let (_, network_type) = self.config.burnchain.get_bitcoin_network(); - let indexer_runtime = BitcoinIndexerRuntime::new(network_type); - let burnchain_indexer = BitcoinIndexer { - config: self.indexer_config.clone(), - runtime: indexer_runtime, - }; - (self.get_burnchain(), burnchain_indexer) - } - fn receive_blocks_helium(&mut self) -> BurnchainTip { - let (mut burnchain, mut burnchain_indexer) = self.setup_indexer_runtime(); - + let mut burnchain = self.get_burnchain(); let (block_snapshot, state_transition) = loop { - match burnchain.sync_with_indexer_deprecated(&mut burnchain_indexer) { + match burnchain.sync_with_indexer_deprecated(&mut self.indexer) { Ok(x) => { break x; } @@ -405,13 +408,13 @@ impl BitcoinRegtestController { } }; - let (mut burnchain, mut burnchain_indexer) = self.setup_indexer_runtime(); + let mut burnchain = self.get_burnchain(); let (block_snapshot, burnchain_height, state_transition) = loop { if !self.should_keep_running() { return Err(BurnchainControllerError::CoordinatorClosed); } match burnchain.sync_with_indexer( - &mut burnchain_indexer, + &mut self.indexer, coordinator_comms.clone(), target_block_height_opt, Some(burnchain.pox_constants.reward_cycle_length as u64), @@ -439,7 +442,8 @@ impl BitcoinRegtestController { .expect("Sortition DB error.") .expect("BUG: no data for the canonical chain tip"); - let burnchain_height = burnchain_indexer + let burnchain_height = self + .indexer .get_highest_header_height() .map_err(BurnchainControllerError::IndexerError)?; break (snapshot, burnchain_height, state_transition); @@ -1470,20 +1474,35 @@ impl BurnchainController for BitcoinRegtestController { } } + fn get_headers_height(&self) -> u64 { + let (_, network_id) = self.config.burnchain.get_bitcoin_network(); + let spv_client = SpvClient::new( + &self.config.get_spv_headers_file_path(), + 0, + None, + network_id, + false, + false, + ) + .expect("Unable to open burnchain headers DB"); + spv_client + .get_headers_height() + .expect("Unable to query number of burnchain headers") + } + fn connect_dbs(&mut self) -> Result<(), BurnchainControllerError> { - let (burnchain, burnchain_indexer) = self.setup_indexer_runtime(); + let burnchain = self.get_burnchain(); burnchain.connect_db( - &burnchain_indexer, + &self.indexer, true, - burnchain_indexer.get_first_block_header_hash()?, - burnchain_indexer.get_first_block_header_timestamp()?, + self.indexer.get_first_block_header_hash()?, + self.indexer.get_first_block_header_timestamp()?, )?; Ok(()) } fn get_stacks_epochs(&self) -> Vec { - let (_, indexer) = self.setup_indexer_runtime(); - indexer.get_stacks_epochs() + self.indexer.get_stacks_epochs() } fn start( @@ -1755,8 +1774,7 @@ impl BitcoinRPCRequest { match (&config.burnchain.username, &config.burnchain.password) { (Some(username), Some(password)) => { let auth_token = format!("Basic {}", encode(format!("{}:{}", username, password))); - req.append_header("Authorization", auth_token) - .expect("Unable to set header"); + req.append_header("Authorization", auth_token); } (_, _) => {} }; @@ -1996,9 +2014,7 @@ impl BitcoinRPCRequest { return Err(RPCError::Network(format!("RPC Error: {}", err))); } }; - request - .append_header("Content-Type", "application/json") - .expect("Unable to set header"); + request.append_header("Content-Type", "application/json"); request.set_body(body); let mut response = async_std::task::block_on(async move { diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index a5be3e550c..14b9da1c1f 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -86,6 +86,15 @@ impl BurnchainController for MocknetController { } } + fn get_headers_height(&self) -> u64 { + match &self.chain_tip { + Some(chain_tip) => chain_tip.block_snapshot.block_height, + None => { + unreachable!(); + } + } + } + fn get_stacks_epochs(&self) -> Vec { match &self.config.burnchain.epochs { Some(epochs) => epochs.clone(), diff --git a/testnet/stacks-node/src/burnchains/mod.rs b/testnet/stacks-node/src/burnchains/mod.rs index dade40ef88..50682fd0e2 100644 --- a/testnet/stacks-node/src/burnchains/mod.rs +++ b/testnet/stacks-node/src/burnchains/mod.rs @@ -51,6 +51,7 @@ pub trait BurnchainController { fn sortdb_ref(&self) -> &SortitionDB; fn sortdb_mut(&mut self) -> &mut SortitionDB; fn get_chain_tip(&self) -> BurnchainTip; + fn get_headers_height(&self) -> u64; /// Invoke connect() on underlying burnchain and sortition databases, to perform any migration /// or instantiation before other callers may use open() fn connect_dbs(&mut self) -> Result<(), Error>; diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index b42adabbc2..d80f7212fe 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -14,6 +14,8 @@ use stacks::core::StacksEpoch; use stacks::core::{ CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, }; +use stacks::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; +use stacks::cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; use stacks::cost_estimates::fee_scalar::ScalarFeeRateEstimator; use stacks::cost_estimates::metrics::CostMetric; use stacks::cost_estimates::metrics::ProportionalDotProduct; @@ -505,6 +507,9 @@ impl Config { subsequent_attempt_time_ms: miner .subsequent_attempt_time_ms .unwrap_or(miner_default_config.subsequent_attempt_time_ms), + microblock_attempt_time_ms: miner + .microblock_attempt_time_ms + .unwrap_or(miner_default_config.microblock_attempt_time_ms), probability_pick_no_estimate_tx: miner .probability_pick_no_estimate_tx .unwrap_or(miner_default_config.probability_pick_no_estimate_tx), @@ -713,9 +718,6 @@ impl Config { inv_sync_interval: opts .inv_sync_interval .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.inv_sync_interval), - full_inv_sync_interval: opts.full_inv_sync_interval.unwrap_or_else(|| { - HELIUM_DEFAULT_CONNECTION_OPTIONS.full_inv_sync_interval - }), inv_reward_cycles: opts.inv_reward_cycles.unwrap_or_else(|| { if burnchain.mode == "mainnet" { HELIUM_DEFAULT_CONNECTION_OPTIONS.inv_reward_cycles @@ -774,6 +776,17 @@ impl Config { path } + /// Returns the path `{get_chainstate_path()}/estimates`, and ensures it exists. + pub fn get_estimates_path(&self) -> PathBuf { + let mut path = self.get_chainstate_path(); + path.push("estimates"); + fs::create_dir_all(&path).expect(&format!( + "Failed to create `estimates` directory at {}", + path.to_string_lossy() + )); + path + } + pub fn get_chainstate_path_str(&self) -> String { self.get_chainstate_path() .to_str() @@ -848,9 +861,15 @@ impl Config { self.events_observers.len() > 0 } - pub fn make_block_builder_settings(&self, attempt: u64) -> BlockBuilderSettings { + pub fn make_block_builder_settings( + &self, + attempt: u64, + microblocks: bool, + ) -> BlockBuilderSettings { BlockBuilderSettings { - max_miner_time_ms: if attempt <= 1 { + max_miner_time_ms: if microblocks { + self.miner.microblock_attempt_time_ms + } else if attempt <= 1 { // first attempt to mine a block -- do so right away self.miner.first_attempt_time_ms } else { @@ -859,7 +878,9 @@ impl Config { }, mempool_settings: MemPoolWalkSettings { min_tx_fee: self.miner.min_tx_fee, - max_walk_time_ms: if attempt <= 1 { + max_walk_time_ms: if microblocks { + self.miner.microblock_attempt_time_ms + } else if attempt <= 1 { // first attempt to mine a block -- do so right away self.miner.first_attempt_time_ms } else { @@ -1040,6 +1061,7 @@ pub enum CostEstimatorName { #[derive(Clone, Debug)] pub enum FeeEstimatorName { ScalarFeeRate, + FuzzedWeightedMedianFeeRate, } #[derive(Clone, Debug)] @@ -1082,6 +1104,8 @@ impl FeeEstimatorName { fn panic_parse(s: String) -> FeeEstimatorName { if &s.to_lowercase() == "scalar_fee_rate" { FeeEstimatorName::ScalarFeeRate + } else if &s.to_lowercase() == "fuzzed_weighted_median_fee_rate" { + FeeEstimatorName::FuzzedWeightedMedianFeeRate } else { panic!( "Bad fee estimator name supplied in configuration file: {}", @@ -1107,6 +1131,12 @@ pub struct FeeEstimationConfig { pub fee_estimator: Option, pub cost_metric: Option, pub log_error: bool, + /// If using FeeRateFuzzer, the amount of random noise, as a percentage of the base value (in + /// [0, 1]) to add for fuzz. See comments on FeeRateFuzzer. + pub fee_rate_fuzzer_fraction: f64, + /// If using WeightedMedianFeeRateEstimator, the window size to use. See comments on + /// WeightedMedianFeeRateEstimator. + pub fee_rate_window_size: u64, } impl Default for FeeEstimationConfig { @@ -1116,6 +1146,8 @@ impl Default for FeeEstimationConfig { fee_estimator: Some(FeeEstimatorName::default()), cost_metric: Some(CostMetricName::default()), log_error: false, + fee_rate_fuzzer_fraction: 0.1f64, + fee_rate_window_size: 5u64, } } } @@ -1128,6 +1160,8 @@ impl From for FeeEstimationConfig { fee_estimator: None, cost_metric: None, log_error: false, + fee_rate_fuzzer_fraction: 0f64, + fee_rate_window_size: 0u64, }; } let cost_estimator = f @@ -1148,6 +1182,8 @@ impl From for FeeEstimationConfig { fee_estimator: Some(fee_estimator), cost_metric: Some(cost_metric), log_error, + fee_rate_fuzzer_fraction: f.fee_rate_fuzzer_fraction.unwrap_or(0.1f64), + fee_rate_window_size: f.fee_rate_window_size.unwrap_or(5u64), } } } @@ -1158,7 +1194,7 @@ impl Config { match self.estimation.cost_estimator.as_ref()? { CostEstimatorName::NaivePessimistic => Box::new( self.estimation - .make_pessimistic_cost_estimator(self.get_chainstate_path()), + .make_pessimistic_cost_estimator(self.get_estimates_path()), ), }; @@ -1178,10 +1214,12 @@ impl Config { pub fn make_fee_estimator(&self) -> Option> { let metric = self.make_cost_metric()?; let fee_estimator: Box = match self.estimation.fee_estimator.as_ref()? { - FeeEstimatorName::ScalarFeeRate => Box::new( - self.estimation - .make_scalar_fee_estimator(self.get_chainstate_path(), metric), - ), + FeeEstimatorName::ScalarFeeRate => self + .estimation + .make_scalar_fee_estimator(self.get_estimates_path(), metric), + FeeEstimatorName::FuzzedWeightedMedianFeeRate => self + .estimation + .make_fuzzed_weighted_median_fee_estimator(self.get_estimates_path(), metric), }; Some(fee_estimator) @@ -1191,30 +1229,58 @@ impl Config { impl FeeEstimationConfig { pub fn make_pessimistic_cost_estimator( &self, - mut chainstate_path: PathBuf, + mut estimates_path: PathBuf, ) -> PessimisticEstimator { if let Some(CostEstimatorName::NaivePessimistic) = self.cost_estimator.as_ref() { - chainstate_path.push("cost_estimator_pessimistic.sqlite"); - PessimisticEstimator::open(&chainstate_path, self.log_error) + estimates_path.push("cost_estimator_pessimistic.sqlite"); + PessimisticEstimator::open(&estimates_path, self.log_error) .expect("Error opening cost estimator") } else { panic!("BUG: Expected to configure a naive pessimistic cost estimator"); } } - pub fn make_scalar_fee_estimator( + pub fn make_scalar_fee_estimator( &self, - mut chainstate_path: PathBuf, + mut estimates_path: PathBuf, metric: CM, - ) -> ScalarFeeRateEstimator { + ) -> Box { if let Some(FeeEstimatorName::ScalarFeeRate) = self.fee_estimator.as_ref() { - chainstate_path.push("fee_estimator_scalar_rate.sqlite"); - ScalarFeeRateEstimator::open(&chainstate_path, metric) - .expect("Error opening fee estimator") + estimates_path.push("fee_estimator_scalar_rate.sqlite"); + Box::new( + ScalarFeeRateEstimator::open(&estimates_path, metric) + .expect("Error opening fee estimator"), + ) } else { panic!("BUG: Expected to configure a scalar fee estimator"); } } + + // Creates a fuzzed WeightedMedianFeeRateEstimator with window_size 5. The fuzz + // is uniform with bounds [+/- 0.5]. + pub fn make_fuzzed_weighted_median_fee_estimator( + &self, + mut estimates_path: PathBuf, + metric: CM, + ) -> Box { + if let Some(FeeEstimatorName::FuzzedWeightedMedianFeeRate) = self.fee_estimator.as_ref() { + estimates_path.push("fee_fuzzed_weighted_median.sqlite"); + let underlying_estimator = WeightedMedianFeeRateEstimator::open( + &estimates_path, + metric, + self.fee_rate_window_size + .try_into() + .expect("Configured fee rate window size out of bounds."), + ) + .expect("Error opening fee estimator"); + Box::new(FeeRateFuzzer::new( + underlying_estimator, + self.fee_rate_fuzzer_fraction, + )) + } else { + panic!("BUG: Expected to configure a weighted median fee estimator"); + } + } } impl NodeConfig { @@ -1340,6 +1406,7 @@ pub struct MinerConfig { pub min_tx_fee: u64, pub first_attempt_time_ms: u64, pub subsequent_attempt_time_ms: u64, + pub microblock_attempt_time_ms: u64, pub probability_pick_no_estimate_tx: u8, } @@ -1347,8 +1414,9 @@ impl MinerConfig { pub fn default() -> MinerConfig { MinerConfig { min_tx_fee: 1, - first_attempt_time_ms: 1_000, - subsequent_attempt_time_ms: 60_000, + first_attempt_time_ms: 5_000, + subsequent_attempt_time_ms: 180_000, + microblock_attempt_time_ms: 30_000, probability_pick_no_estimate_tx: 5, } } @@ -1427,6 +1495,8 @@ pub struct FeeEstimationConfigFile { pub cost_metric: Option, pub disabled: Option, pub log_error: Option, + pub fee_rate_fuzzer_fraction: Option, + pub fee_rate_window_size: Option, } impl Default for FeeEstimationConfigFile { @@ -1437,6 +1507,8 @@ impl Default for FeeEstimationConfigFile { cost_metric: None, disabled: None, log_error: None, + fee_rate_fuzzer_fraction: None, + fee_rate_window_size: None, } } } @@ -1446,6 +1518,7 @@ pub struct MinerConfigFile { pub min_tx_fee: Option, pub first_attempt_time_ms: Option, pub subsequent_attempt_time_ms: Option, + pub microblock_attempt_time_ms: Option, pub probability_pick_no_estimate_tx: Option, } @@ -1471,6 +1544,7 @@ pub enum EventKeyType { AnyEvent, BurnchainBlocks, MinedBlocks, + MinedMicroblocks, } impl EventKeyType { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 9b5396b4c5..714d224013 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -21,14 +21,16 @@ use stacks::chainstate::stacks::events::{ FTEventType, NFTEventType, STXEventType, StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin, }; -use stacks::chainstate::stacks::StacksBlock; use stacks::chainstate::stacks::{ db::accounts::MinerReward, db::MinerRewardInfo, StacksTransaction, }; +use stacks::chainstate::stacks::{StacksBlock, StacksMicroblock}; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::{MemPoolDropReason, MemPoolEventDispatcher}; use stacks::net::atlas::{Attachment, AttachmentInstance}; -use stacks::types::chainstate::{BurnchainHeaderHash, StacksAddress, StacksBlockId}; +use stacks::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, +}; use stacks::util::hash::bytes_to_hex; use stacks::vm::analysis::contract_interface_builder::build_contract_interface; use stacks::vm::costs::ExecutionCost; @@ -36,7 +38,9 @@ use stacks::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; use super::config::{EventKeyType, EventObserverConfig}; use super::node::ChainTip; +use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; +use stacks::chainstate::stacks::miner::TransactionEvent; #[derive(Debug, Clone)] struct EventObserver { @@ -61,11 +65,12 @@ pub const PATH_MICROBLOCK_SUBMIT: &str = "new_microblocks"; pub const PATH_MEMPOOL_TX_SUBMIT: &str = "new_mempool_tx"; pub const PATH_MEMPOOL_TX_DROP: &str = "drop_mempool_tx"; pub const PATH_MINED_BLOCK: &str = "mined_block"; +pub const PATH_MINED_MICROBLOCK: &str = "mined_microblock"; pub const PATH_BURN_BLOCK_SUBMIT: &str = "new_burn_block"; pub const PATH_BLOCK_PROCESSED: &str = "new_block"; pub const PATH_ATTACHMENT_PROCESSED: &str = "attachments/new"; -#[derive(Clone, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct MinedBlockEvent { pub target_burn_height: u64, pub block_hash: String, @@ -73,6 +78,16 @@ pub struct MinedBlockEvent { pub block_size: u64, pub anchored_cost: ExecutionCost, pub confirmed_microblocks_cost: ExecutionCost, + pub tx_events: Vec, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct MinedMicroblockEvent { + pub block_hash: String, + pub sequence: u16, + pub tx_events: Vec, + pub anchor_block_consensus_hash: ConsensusHash, + pub anchor_block: BlockHeaderHash, } impl EventObserver { @@ -107,15 +122,14 @@ impl EventObserver { let body = body.clone(); let mut req = Request::new(Method::Post, url.clone()); - req.append_header("Content-Type", "application/json") - .expect("Unable to set header"); + req.append_header("Content-Type", "application/json"); req.set_body(body); let response = async_std::task::block_on(async { let stream = match TcpStream::connect(self.endpoint.clone()).await { Ok(stream) => stream, Err(err) => { - println!("Event dispatcher: connection failed - {:?}", err); + warn!("Event dispatcher: connection failed - {:?}", err); return None; } }; @@ -123,7 +137,7 @@ impl EventObserver { match client::connect(stream, req).await { Ok(response) => Some(response), Err(err) => { - println!("Event dispatcher: rpc invokation failed - {:?}", err); + warn!("Event dispatcher: rpc invokation failed - {:?}", err); return None; } } @@ -311,6 +325,10 @@ impl EventObserver { self.send_payload(payload, PATH_MINED_BLOCK); } + fn send_mined_microblock(&self, payload: &serde_json::Value) { + self.send_payload(payload, PATH_MINED_MICROBLOCK); + } + fn send_new_burn_block(&self, payload: &serde_json::Value) { self.send_payload(payload, PATH_BURN_BLOCK_SUBMIT); } @@ -385,6 +403,7 @@ pub struct EventDispatcher { stx_observers_lookup: HashSet, any_event_observers_lookup: HashSet, miner_observers_lookup: HashSet, + mined_microblocks_observers_lookup: HashSet, boot_receipts: Arc>>>, } @@ -402,6 +421,7 @@ impl MemPoolEventDispatcher for EventDispatcher { block_size_bytes: u64, consumed: &ExecutionCost, confirmed_microblock_cost: &ExecutionCost, + tx_events: Vec, ) { self.process_mined_block_event( target_burn_height, @@ -409,8 +429,24 @@ impl MemPoolEventDispatcher for EventDispatcher { block_size_bytes, consumed, confirmed_microblock_cost, + tx_events, ) } + + fn mined_microblock_event( + &self, + microblock: &StacksMicroblock, + tx_events: Vec, + anchor_block_consensus_hash: ConsensusHash, + anchor_block: BlockHeaderHash, + ) { + self.process_mined_microblock_event( + microblock, + tx_events, + anchor_block_consensus_hash, + anchor_block, + ); + } } impl BlockEventDispatcher for EventDispatcher { @@ -483,6 +519,7 @@ impl EventDispatcher { microblock_observers_lookup: HashSet::new(), boot_receipts: Arc::new(Mutex::new(None)), miner_observers_lookup: HashSet::new(), + mined_microblocks_observers_lookup: HashSet::new(), } } @@ -790,6 +827,7 @@ impl EventDispatcher { block_size_bytes: u64, consumed: &ExecutionCost, confirmed_microblock_cost: &ExecutionCost, + tx_events: Vec, ) { let interested_observers: Vec<_> = self .registered_observers @@ -808,6 +846,7 @@ impl EventDispatcher { block_size: block_size_bytes, anchored_cost: consumed.clone(), confirmed_microblocks_cost: confirmed_microblock_cost.clone(), + tx_events, }) .unwrap(); @@ -816,6 +855,40 @@ impl EventDispatcher { } } + pub fn process_mined_microblock_event( + &self, + microblock: &StacksMicroblock, + tx_events: Vec, + anchor_block_consensus_hash: ConsensusHash, + anchor_block: BlockHeaderHash, + ) { + let interested_observers: Vec<_> = self + .registered_observers + .iter() + .enumerate() + .filter(|(obs_id, _observer)| { + self.mined_microblocks_observers_lookup + .contains(&(*obs_id as u16)) + }) + .collect(); + if interested_observers.len() < 1 { + return; + } + + let payload = serde_json::to_value(MinedMicroblockEvent { + block_hash: microblock.block_hash().to_string(), + sequence: microblock.header.sequence, + tx_events, + anchor_block_consensus_hash, + anchor_block, + }) + .unwrap(); + + for (_, observer) in interested_observers.iter() { + observer.send_mined_microblock(&payload); + } + } + pub fn process_dropped_mempool_txs(&self, txs: Vec, reason: MemPoolDropReason) { // lazily assemble payload only if we have observers let interested_observers: Vec<_> = self @@ -940,6 +1013,10 @@ impl EventDispatcher { EventKeyType::MinedBlocks => { self.miner_observers_lookup.insert(observer_index); } + EventKeyType::MinedMicroblocks => { + self.mined_microblocks_observers_lookup + .insert(observer_index); + } } } diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 3b110221fb..3ce02b6ba0 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -38,7 +38,6 @@ pub use self::burnchains::{ pub use self::config::{Config, ConfigFile}; pub use self::event_dispatcher::EventDispatcher; pub use self::keychain::Keychain; -pub use self::neon_node::{InitializedNeonNode, NeonGenesisNode}; pub use self::node::{ChainTip, Node}; pub use self::run_loop::{helium, neon}; pub use self::tenure::Tenure; diff --git a/testnet/stacks-node/src/monitoring/prometheus.rs b/testnet/stacks-node/src/monitoring/prometheus.rs index eb55e33315..9342a8f8d4 100644 --- a/testnet/stacks-node/src/monitoring/prometheus.rs +++ b/testnet/stacks-node/src/monitoring/prometheus.rs @@ -35,7 +35,7 @@ pub fn start_serving_prometheus_metrics(bind_address: String) { let addr = addr.clone(); task::spawn(async { - if let Err(err) = accept(addr, stream).await { + if let Err(err) = accept(stream).await { eprintln!("{}", err); } }); @@ -43,18 +43,16 @@ pub fn start_serving_prometheus_metrics(bind_address: String) { }); } -async fn accept(addr: String, stream: TcpStream) -> http_types::Result<()> { +async fn accept(stream: TcpStream) -> http_types::Result<()> { debug!("Handle Prometheus polling ({})", stream.peer_addr()?); - async_h1::accept(&addr, stream.clone(), |_| async { + async_h1::accept(stream.clone(), |_| async { let encoder = TextEncoder::new(); let metric_families = gather(); let mut buffer = vec![]; encoder.encode(&metric_families, &mut buffer).unwrap(); let mut response = Response::new(StatusCode::Ok); - response - .append_header("Content-Type", encoder.format_type()) - .expect("Unable to set headers"); + response.append_header("Content-Type", encoder.format_type()); response.set_body(Body::from(buffer)); Ok(response) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index d463b114bc..6fcd39933c 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -5,10 +5,7 @@ use std::convert::{TryFrom, TryInto}; use std::default::Default; use std::net::SocketAddr; use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError}; -use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, Mutex, -}; +use std::sync::{atomic::Ordering, Arc, Mutex}; use std::{thread, thread::JoinHandle}; use stacks::burnchains::{Burnchain, BurnchainParameters, Txid}; @@ -22,9 +19,7 @@ use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; -use stacks::chainstate::stacks::db::{ - ChainStateBootData, ClarityTx, StacksChainState, MINER_REWARD_MATURITY, -}; +use stacks::chainstate::stacks::db::{StacksChainState, MINER_REWARD_MATURITY}; use stacks::chainstate::stacks::Error as ChainstateError; use stacks::chainstate::stacks::StacksPublicKey; use stacks::chainstate::stacks::{ @@ -48,7 +43,7 @@ use stacks::net::{ p2p::PeerNetwork, relay::Relayer, rpc::RPCHandlerArgs, - Error as NetError, NetworkResult, PeerAddress, + Error as NetError, NetworkResult, PeerAddress, ServiceFlags, }; use stacks::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockHeader, VRFSeed, @@ -63,8 +58,9 @@ use stacks::vm::costs::ExecutionCost; use stacks::{burnchains::BurnchainSigner, chainstate::stacks::db::StacksHeaderInfo}; use crate::burnchains::bitcoin_regtest_controller::BitcoinRegtestController; +use crate::run_loop::neon::Counters; +use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; -use crate::syncctl::PoxSyncWatchdogComms; use crate::ChainTip; use super::{BurnchainController, BurnchainTip, Config, EventDispatcher, Keychain}; @@ -96,15 +92,15 @@ enum RelayerDirective { ProcessTenure(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), RunTenure(RegisteredKey, BlockSnapshot, u128), // (vrf key, chain tip, time of issuance in ms) RegisterKey(BlockSnapshot), - RunMicroblockTenure(u128), // time of issuance in ms + RunMicroblockTenure(BlockSnapshot, u128), // time of issuance in ms Exit, } -pub struct InitializedNeonNode { +pub struct StacksNode { config: Config, relay_channel: SyncSender, + last_sortition: Arc>>, burnchain_signer: BurnchainSigner, - last_burn_block: Option, is_miner: bool, pub atlas_config: AtlasConfig, leader_key_registration_state: LeaderKeyRegistrationState, @@ -112,34 +108,29 @@ pub struct InitializedNeonNode { pub relayer_thread_handle: JoinHandle<()>, } -pub struct NeonGenesisNode { - pub config: Config, - keychain: Keychain, - event_dispatcher: EventDispatcher, - burnchain: Burnchain, -} - #[cfg(test)] -type BlocksProcessedCounter = std::sync::Arc; - -#[cfg(not(test))] -type BlocksProcessedCounter = (); - -#[cfg(test)] -fn bump_processed_counter(blocks_processed: &BlocksProcessedCounter) { - blocks_processed.fetch_add(1, std::sync::atomic::Ordering::SeqCst); -} - -#[cfg(not(test))] -fn bump_processed_counter(_blocks_processed: &BlocksProcessedCounter) {} - -#[cfg(test)] -fn set_processed_counter(blocks_processed: &BlocksProcessedCounter, value: u64) { - blocks_processed.store(value, std::sync::atomic::Ordering::SeqCst); +fn fault_injection_long_tenure() { + // simulated slow block + match std::env::var("STX_TEST_SLOW_TENURE") { + Ok(tenure_str) => match tenure_str.parse::() { + Ok(tenure_time) => { + info!( + "Fault injection: sleeping for {} milliseconds to simulate a long tenure", + tenure_time + ); + stacks::util::sleep_ms(tenure_time); + } + Err(_) => { + error!("Parse error for STX_TEST_SLOW_TENURE"); + panic!(); + } + }, + _ => {} + } } #[cfg(not(test))] -fn set_processed_counter(_blocks_processed: &BlocksProcessedCounter, _value: u64) {} +fn fault_injection_long_tenure() {} enum Error { HeaderNotFoundForChainTip, @@ -333,6 +324,7 @@ fn mine_one_microblock( sortdb: &SortitionDB, chainstate: &mut StacksChainState, mempool: &mut MemPoolDB, + event_dispatcher: &EventDispatcher, ) -> Result { debug!( "Try to mine one microblock off of {}/{} (total: {})", @@ -368,7 +360,11 @@ fn mine_one_microblock( let t1 = get_epoch_time_ms(); - let mblock = microblock_miner.mine_next_microblock(mempool, µblock_state.miner_key)?; + let mblock = microblock_miner.mine_next_microblock( + mempool, + µblock_state.miner_key, + event_dispatcher, + )?; let new_cost_so_far = microblock_miner.get_cost_so_far().expect("BUG: cannot read cost so far from miner -- indicates that the underlying Clarity Tx is somehow in use still."); let t2 = get_epoch_time_ms(); @@ -411,6 +407,7 @@ fn try_mine_microblock( sortdb: &SortitionDB, mem_pool: &mut MemPoolDB, winning_tip: (ConsensusHash, BlockHeaderHash, Secp256k1PrivateKey), + event_dispatcher: &EventDispatcher, ) -> Result, NetError> { let ch = winning_tip.0; let bhh = winning_tip.1; @@ -439,7 +436,7 @@ fn try_mine_microblock( last_mined: 0, quantity: 0, cost_so_far: cost_so_far, - settings: config.make_block_builder_settings(2), + settings: config.make_block_builder_settings(0, true), }); } Ok(None) => { @@ -471,7 +468,13 @@ fn try_mine_microblock( get_epoch_time_secs() - 600, )?; if num_attachable == 0 { - match mine_one_microblock(&mut microblock_miner, sortdb, chainstate, mem_pool) { + match mine_one_microblock( + &mut microblock_miner, + sortdb, + chainstate, + mem_pool, + event_dispatcher, + ) { Ok(microblock) => { // will need to relay this next_microblock = Some(microblock); @@ -505,7 +508,7 @@ fn run_microblock_tenure( mem_pool: &mut MemPoolDB, relayer: &mut Relayer, miner_tip: (ConsensusHash, BlockHeaderHash, Secp256k1PrivateKey), - microblocks_processed: BlocksProcessedCounter, + counters: &Counters, event_dispatcher: &EventDispatcher, ) { // TODO: this is sensitive to poll latency -- can we call this on a fixed @@ -526,6 +529,7 @@ fn run_microblock_tenure( sortdb, mem_pool, miner_tip.clone(), + event_dispatcher, ) { Ok(x) => x, Err(e) => { @@ -550,7 +554,7 @@ fn run_microblock_tenure( "Mined one microblock: {} seq {} (total processed: {})", µblock_hash, next_microblock.header.sequence, num_mblocks ); - set_processed_counter(µblocks_processed, num_mblocks); + counters.set_microblocks_processed(num_mblocks); let parent_index_block_hash = StacksBlockHeader::make_index_block_hash(parent_consensus_hash, parent_block_hash); @@ -612,19 +616,21 @@ fn recv_unconfirmed_txs( } fn spawn_peer( - is_mainnet: bool, + runloop: &RunLoop, mut this: PeerNetwork, p2p_sock: &SocketAddr, rpc_sock: &SocketAddr, - config: Config, poll_timeout: u64, relay_channel: SyncSender, - mut sync_comms: PoxSyncWatchdogComms, attachments_rx: Receiver>, unconfirmed_txs: Arc>, - event_observer: EventDispatcher, - should_keep_running: Arc, ) -> Result, NetError> { + let config = runloop.config().clone(); + let mut sync_comms = runloop.get_pox_sync_comms(); + let event_dispatcher = runloop.get_event_dispatcher(); + let should_keep_running = runloop.get_termination_switch(); + + let is_mainnet = config.is_mainnet(); let burn_db_path = config.get_burn_db_file_path(); let stacks_chainstate_path = config.get_chainstate_path_str(); let exit_at_block_height = config.burnchain.process_exit_at_block_height; @@ -646,12 +652,14 @@ fn spawn_peer( let server_thread = thread::Builder::new() .name("p2p".to_string()) .spawn(move || { + // create estimators, metric instances for RPC handler let cost_estimator = config .make_cost_estimator() .unwrap_or_else(|| Box::new(UnitEstimator)); let metric = config .make_cost_metric() .unwrap_or_else(|| Box::new(UnitMetric)); + let fee_estimator = config.make_fee_estimator(); let mut mem_pool = MemPoolDB::open( is_mainnet, @@ -662,20 +670,18 @@ fn spawn_peer( ) .expect("Database failure opening mempool"); - // create estimators, metric instances for RPC handler let cost_estimator = config .make_cost_estimator() .unwrap_or_else(|| Box::new(UnitEstimator)); let metric = config .make_cost_metric() .unwrap_or_else(|| Box::new(UnitMetric)); - let fee_estimator = config.make_fee_estimator(); let handler_args = RPCHandlerArgs { exit_at_block_height: exit_at_block_height.as_ref(), genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) .unwrap(), - event_observer: Some(&event_observer), + event_observer: Some(&event_dispatcher), cost_estimator: Some(cost_estimator.as_ref()), cost_metric: Some(metric.as_ref()), fee_estimator: fee_estimator.as_ref().map(|x| x.as_ref()), @@ -754,6 +760,7 @@ fn spawn_peer( if mblock_deadline < get_epoch_time_ms() { debug!("P2P: schedule microblock tenure"); results_with_data.push_back(RelayerDirective::RunMicroblockTenure( + this.burnchain_tip.clone(), get_epoch_time_ms(), )); mblock_deadline = @@ -778,7 +785,9 @@ fn spawn_peer( ); match e { TrySendError::Full(directive) => { - if let RelayerDirective::RunMicroblockTenure(_) = directive { + if let RelayerDirective::RunMicroblockTenure(..) = directive { + // can drop this + } else if let RelayerDirective::RunTenure(..) = directive { // can drop this } else { // don't lose this data -- just try it again @@ -813,23 +822,52 @@ fn spawn_peer( Ok(server_thread) } +fn get_last_sortition(last_sortition: &Arc>>) -> Option { + match last_sortition.lock() { + Ok(sort_opt) => sort_opt.clone(), + Err(_) => { + error!("Sortition mutex poisoned!"); + panic!(); + } + } +} + +fn set_last_sortition( + last_sortition: &mut Arc>>, + block_snapshot: BlockSnapshot, +) { + match last_sortition.lock() { + Ok(mut sortition_opt) => { + sortition_opt.replace(block_snapshot); + } + Err(_) => { + error!("Sortition mutex poisoned!"); + panic!(); + } + }; +} + fn spawn_miner_relayer( - is_mainnet: bool, - chain_id: u32, + runloop: &RunLoop, mut relayer: Relayer, local_peer: LocalPeer, - config: Config, mut keychain: Keychain, - burn_db_path: String, - stacks_chainstate_path: String, relay_channel: Receiver, - event_dispatcher: EventDispatcher, - blocks_processed: BlocksProcessedCounter, - microblocks_processed: BlocksProcessedCounter, - burnchain: Burnchain, + last_sortition: Arc>>, coord_comms: CoordinatorChannels, unconfirmed_txs: Arc>, ) -> Result, NetError> { + let config = runloop.config().clone(); + let event_dispatcher = runloop.get_event_dispatcher(); + let counters = runloop.get_counters(); + let sync_comms = runloop.get_pox_sync_comms(); + let burnchain = runloop.get_burnchain(); + + let is_mainnet = config.is_mainnet(); + let chain_id = config.burnchain.chain_id; + let burn_db_path = config.get_burn_db_file_path(); + let stacks_chainstate_path = config.get_chainstate_path_str(); + // Note: the chainstate coordinator is *the* block processor, it is responsible for writes to // the chainstate -- eventually, no other codepaths should be writing to it. // @@ -873,6 +911,7 @@ fn spawn_miner_relayer( &mut sortdb, &mut chainstate, &mut mem_pool, + sync_comms.get_ibd(), Some(&coord_comms), Some(&event_dispatcher), ) @@ -967,7 +1006,14 @@ fn spawn_miner_relayer( vec![consensus_hash.clone()], ) .expect("Failed to obtain block information for a block we mined."); - if let Err(e) = relayer.advertize_blocks(blocks_available) { + + let block_data = { + let mut bd = HashMap::new(); + bd.insert(consensus_hash.clone(), mined_block.clone()); + bd + }; + + if let Err(e) = relayer.advertize_blocks(blocks_available, block_data) { warn!("Failed to advertise new block: {}", e); } @@ -1015,9 +1061,12 @@ fn spawn_miner_relayer( } } RelayerDirective::RunTenure(registered_key, last_burn_block, issue_timestamp_ms) => { - if last_tenure_issue_time > issue_timestamp_ms { - // coalesce -- stale - continue; + if let Some(cur_sortition) = get_last_sortition(&last_sortition) { + if last_burn_block.sortition_id != cur_sortition.sortition_id { + debug!("Drop stale RunTenure for {}: current sortition is for {}", &last_burn_block.burn_header_hash, &cur_sortition.burn_header_hash); + counters.bump_missed_tenures(); + continue; + } } let burn_header_hash = last_burn_block.burn_header_hash.clone(); @@ -1033,7 +1082,7 @@ fn spawn_miner_relayer( // no burnchain change, so only re-run block tenure every so often in order // to give microblocks a chance to collect if issue_timestamp_ms < last_tenure_issue_time + (config.node.wait_time_for_microblocks as u128) { - debug!("Relayer: will NOT run tenure since issuance is too fresh"); + debug!("Relayer: will NOT run tenure since issuance at {} is too fresh (wait until {} + {} = {})", issue_timestamp_ms / 1000, last_tenure_issue_time / 1000, config.node.wait_time_for_microblocks / 1000, (last_tenure_issue_time + (config.node.wait_time_for_microblocks as u128)) / 1000); continue; } } @@ -1042,6 +1091,7 @@ fn spawn_miner_relayer( burn_tenure_snapshot = burn_chain_sn; if issue_timestamp_ms + (config.node.wait_time_for_microblocks as u128) < get_epoch_time_ms() { // still waiting for microblocks to arrive + debug!("Relayer: will NOT run tenure since still waiting for microblocks to arrive ({} <= {})", (issue_timestamp_ms + (config.node.wait_time_for_microblocks as u128)) / 1000, get_epoch_time_secs()); continue; } debug!("Relayer: burnchain has advanced from {} to {}", &burn_header_hash, &burn_chain_tip); @@ -1054,11 +1104,14 @@ fn spawn_miner_relayer( "last_burn_header_hash" => %burn_header_hash ); + let tenure_begin = get_epoch_time_ms(); + fault_injection_long_tenure(); + let mut last_mined_blocks_vec = last_mined_blocks .remove(&burn_header_hash) .unwrap_or_default(); - let last_mined_block_opt = InitializedNeonNode::relayer_run_tenure( + let last_mined_block_opt = StacksNode::relayer_run_tenure( &config, registered_key, &mut chainstate, @@ -1074,14 +1127,14 @@ fn spawn_miner_relayer( ); if let Some((last_mined_block, microblock_privkey)) = last_mined_block_opt { if last_mined_blocks_vec.len() == 0 { - // (for testing) only bump once per epoch - bump_processed_counter(&blocks_processed); + counters.bump_blocks_processed(); } last_mined_blocks_vec.push((last_mined_block, microblock_privkey)); } last_mined_blocks.insert(burn_header_hash, last_mined_blocks_vec); last_tenure_issue_time = get_epoch_time_ms(); + debug!("Relayer: RunTenure finished at {} (in {}ms)", last_tenure_issue_time, last_tenure_issue_time.saturating_sub(tenure_begin)); } RelayerDirective::RegisterKey(ref last_burn_block) => { rotate_vrf_and_register( @@ -1090,15 +1143,21 @@ fn spawn_miner_relayer( last_burn_block, &mut bitcoin_controller, ); - bump_processed_counter(&blocks_processed); + counters.bump_blocks_processed(); } - RelayerDirective::RunMicroblockTenure(tenure_issue_ms) => { + RelayerDirective::RunMicroblockTenure(burnchain_tip, tenure_issue_ms) => { if last_microblock_tenure_time > tenure_issue_ms { // stale request continue; } + if let Some(cur_sortition) = get_last_sortition(&last_sortition) { + if burnchain_tip.sortition_id != cur_sortition.sortition_id { + debug!("Drop stale RunMicroblockTenure for {}/{}: current sortition is for {} ({})", &burnchain_tip.consensus_hash, &burnchain_tip.winning_stacks_block_hash, &cur_sortition.consensus_hash, &cur_sortition.burn_header_hash); + continue; + } + } - debug!("Relayer: run microblock tenure"); + debug!("Relayer: Run microblock tenure"); // unconfirmed state must be consistent with the chain tip, as must the // microblock mining state. @@ -1111,6 +1170,7 @@ fn spawn_miner_relayer( else { debug!("Relayer: reset microblock miner state"); microblock_miner_state = None; + counters.set_microblocks_processed(0); } } @@ -1122,7 +1182,7 @@ fn spawn_miner_relayer( &mut mem_pool, &mut relayer, (ch, bh, mblock_pkey), - microblocks_processed.clone(), + &counters, &event_dispatcher, ); @@ -1132,7 +1192,7 @@ fn spawn_miner_relayer( } else { debug!("Relayer: reset unconfirmed state to 0 microblocks"); - set_processed_counter(µblocks_processed, 0); + counters.set_microblocks_processed(0); microblock_miner_state = None; } } @@ -1151,23 +1211,19 @@ enum LeaderKeyRegistrationState { Active(RegisteredKey), } -/// This node is used for both neon testnet and for mainnet -impl InitializedNeonNode { - fn new( - config: Config, - mut keychain: Keychain, - event_dispatcher: EventDispatcher, +impl StacksNode { + pub fn spawn( + runloop: &RunLoop, last_burn_block: Option, - miner: bool, - blocks_processed: BlocksProcessedCounter, - microblocks_processed: BlocksProcessedCounter, coord_comms: CoordinatorChannels, - sync_comms: PoxSyncWatchdogComms, - burnchain: Burnchain, attachments_rx: Receiver>, - atlas_config: AtlasConfig, - should_keep_running: Arc, - ) -> InitializedNeonNode { + ) -> StacksNode { + let config = runloop.config().clone(); + let miner = runloop.is_miner(); + let burnchain = runloop.get_burnchain(); + let atlas_config = AtlasConfig::default(config.is_mainnet()); + let mut keychain = Keychain::default(config.node.seed.clone()); + // we can call _open_ here rather than _connect_, since connect is first called in // make_genesis_block let sortdb = SortitionDB::open(&config.get_burn_db_file_path(), false) @@ -1278,8 +1334,20 @@ impl InitializedNeonNode { } tx.commit().unwrap(); } + + // update services to indicate we can support mempool sync + { + let mut tx = peerdb.tx_begin().unwrap(); + PeerDB::set_local_services( + &mut tx, + (ServiceFlags::RPC as u16) | (ServiceFlags::RELAY as u16), + ) + .unwrap(); + tx.commit().unwrap(); + } + let atlasdb = - AtlasDB::connect(atlas_config, &config.get_atlas_db_file_path(), true).unwrap(); + AtlasDB::connect(atlas_config.clone(), &config.get_atlas_db_file_path(), true).unwrap(); let local_peer = match PeerDB::get_local_peer(peerdb.conn()) { Ok(local_peer) => local_peer, @@ -1318,6 +1386,9 @@ impl InitializedNeonNode { // setup the relayer channel let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER); + let last_burn_block = last_burn_block.map(|x| x.block_snapshot); + let last_sortition = Arc::new(Mutex::new(last_burn_block)); + let burnchain_signer = keychain.get_burnchain_signer(); match monitoring::set_burnchain_signer(burnchain_signer.clone()) { Err(e) => { @@ -1342,52 +1413,38 @@ impl InitializedNeonNode { }; let relayer_thread_handle = spawn_miner_relayer( - config.is_mainnet(), - config.burnchain.chain_id, + runloop, relayer, local_peer, - config.clone(), keychain, - config.get_burn_db_file_path(), - config.get_chainstate_path_str(), relay_recv, - event_dispatcher.clone(), - blocks_processed.clone(), - microblocks_processed.clone(), - burnchain, + last_sortition.clone(), coord_comms, shared_unconfirmed_txs.clone(), ) .expect("Failed to initialize mine/relay thread"); let p2p_thread_handle = spawn_peer( - config.is_mainnet(), + runloop, p2p_net, &p2p_sock, &rpc_sock, - config.clone(), 5000, relay_send.clone(), - sync_comms, attachments_rx, shared_unconfirmed_txs, - event_dispatcher, - should_keep_running, ) .expect("Failed to initialize p2p thread"); info!("Start HTTP server on: {}", &config.node.rpc_bind); info!("Start P2P server on: {}", &config.node.p2p_bind); - let last_burn_block = last_burn_block.map(|x| x.block_snapshot); - let is_miner = miner; - let atlas_config = AtlasConfig::default(config.is_mainnet()); - InitializedNeonNode { + StacksNode { config, relay_channel: relay_send, - last_burn_block, + last_sortition, burnchain_signer, is_miner, atlas_config, @@ -1405,7 +1462,7 @@ impl InitializedNeonNode { return true; } - if let Some(burnchain_tip) = self.last_burn_block.clone() { + if let Some(burnchain_tip) = get_last_sortition(&self.last_sortition) { match self.leader_key_registration_state { LeaderKeyRegistrationState::Active(ref key) => { debug!( @@ -1447,12 +1504,14 @@ impl InitializedNeonNode { return true; } - if let Some(ref snapshot) = &self.last_burn_block { + if let Some(snapshot) = get_last_sortition(&self.last_sortition) { debug!( - "Tenure: Notify sortition! Last snapshot is {}/{} ({})", - &snapshot.consensus_hash, - &snapshot.burn_header_hash, - &snapshot.winning_stacks_block_hash + "Tenure: Notify sortition!"; + "consensus_hash" => %snapshot.consensus_hash, + "burn_block_hash" => %snapshot.burn_header_hash, + "winning_stacks_block_hash" => %snapshot.winning_stacks_block_hash, + "burn_block_height" => &snapshot.block_height, + "sortition_id" => %snapshot.sortition_id ); if snapshot.sortition { return self @@ -1579,7 +1638,7 @@ impl InitializedNeonNode { burn_fee_cap: u64, bitcoin_controller: &mut BitcoinRegtestController, last_mined_blocks: &Vec<&AssembledAnchorBlock>, - event_observer: &EventDispatcher, + event_dispatcher: &EventDispatcher, ) -> Option<(AssembledAnchorBlock, Secp256k1PrivateKey)> { let MiningTenureInformation { mut stacks_parent_header, @@ -1588,7 +1647,10 @@ impl InitializedNeonNode { parent_block_total_burn, parent_winning_vtxindex, coinbase_nonce, - } = if let Some(stacks_tip) = chain_state.get_stacks_chain_tip(burn_db).unwrap() { + } = if let Some(stacks_tip) = chain_state + .get_stacks_chain_tip(burn_db) + .expect("FATAL: could not query chain tip") + { let miner_address = keychain.origin_address(config.is_mainnet()).unwrap(); Self::get_mining_tenure_information( chain_state, @@ -1850,7 +1912,7 @@ impl InitializedNeonNode { &parent_consensus_hash, &stacks_parent_header.anchored_header.block_hash(), &poison_microblock_tx, - Some(event_observer), + Some(event_dispatcher), &stacks_epoch.block_limit, &stacks_epoch.epoch_id, ) { @@ -1871,8 +1933,8 @@ impl InitializedNeonNode { vrf_proof.clone(), mblock_pubkey_hash, &coinbase_tx, - config.make_block_builder_settings((last_mined_blocks.len() + 1) as u64), - Some(event_observer), + config.make_block_builder_settings((last_mined_blocks.len() + 1) as u64, false), + Some(event_dispatcher), ) { Ok(block) => block, Err(ChainstateError::InvalidStacksMicroblock(msg, mblock_header_hash)) => { @@ -1911,8 +1973,8 @@ impl InitializedNeonNode { vrf_proof.clone(), mblock_pubkey_hash, &coinbase_tx, - config.make_block_builder_settings((last_mined_blocks.len() + 1) as u64), - Some(event_observer), + config.make_block_builder_settings((last_mined_blocks.len() + 1) as u64, false), + Some(event_dispatcher), ) { Ok(block) => block, Err(e) => { @@ -1981,19 +2043,57 @@ impl InitializedNeonNode { sunset_burn, burn_block.block_height, ); + + let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + // last chance -- confirm that the stacks tip and burnchain tip are unchanged (since it could have taken long + // enough to build this block that another block could have arrived). + if let Some(stacks_tip) = chain_state + .get_stacks_chain_tip(burn_db) + .expect("FATAL: could not query chain tip") + { + if stacks_tip.anchored_block_hash != anchored_block.header.parent_block + || parent_consensus_hash != stacks_tip.consensus_hash + || cur_burn_chain_tip.sortition_id != burn_block.sortition_id + { + debug!( + "Cancel block-commit; chain tip(s) have changed"; + "block_hash" => %anchored_block.block_hash(), + "tx_count" => anchored_block.txs.len(), + "target_height" => %anchored_block.header.total_work.work, + "parent_consensus_hash" => %parent_consensus_hash, + "parent_block_hash" => %anchored_block.header.parent_block, + "parent_microblock_hash" => %anchored_block.header.parent_microblock, + "parent_microblock_seq" => anchored_block.header.parent_microblock_sequence, + "old_tip_burn_block_hash" => %burn_block.burn_header_hash, + "old_tip_burn_block_height" => burn_block.block_height, + "old_tip_burn_block_sortition_id" => %burn_block.sortition_id, + "attempt" => attempt, + "new_stacks_tip_block_hash" => %stacks_tip.anchored_block_hash, + "new_stacks_tip_consensus_hash" => %stacks_tip.consensus_hash, + "new_tip_burn_block_height" => cur_burn_chain_tip.block_height, + "new_tip_burn_block_sortition_id" => %cur_burn_chain_tip.sortition_id, + "new_burn_block_sortition_id" => %cur_burn_chain_tip.sortition_id + ); + return None; + } + } + let mut op_signer = keychain.generate_op_signer(); debug!( - "Submit block-commit for block {} tx-count {} height {} off of {}/{} with microblock parent {} (seq {}) in burn block {} ({}); attempt {}", - &anchored_block.block_hash(), - anchored_block.txs.len(), - anchored_block.header.total_work.work, - &parent_consensus_hash, - &anchored_block.header.parent_block, - &anchored_block.header.parent_microblock, - &anchored_block.header.parent_microblock_sequence, - &burn_block.burn_header_hash, - burn_block.block_height, - attempt + "Submit block-commit"; + "block_hash" => %anchored_block.block_hash(), + "tx_count" => anchored_block.txs.len(), + "target_height" => anchored_block.header.total_work.work, + "parent_consensus_hash" => %parent_consensus_hash, + "parent_block_hash" => %anchored_block.header.parent_block, + "parent_microblock_hash" => %anchored_block.header.parent_microblock, + "parent_microblock_seq" => anchored_block.header.parent_microblock_sequence, + "tip_burn_block_hash" => %burn_block.burn_header_hash, + "tip_burn_block_height" => burn_block.block_height, + "tip_burn_block_sortition_id" => %burn_block.sortition_id, + "attempt" => attempt ); let res = bitcoin_controller.submit_operation(op, &mut op_signer, attempt); @@ -2099,116 +2199,12 @@ impl InitializedNeonNode { // no-op on UserBurnSupport ops are not supported / produced at this point. - self.last_burn_block = Some(block_snapshot); - + set_last_sortition(&mut self.last_sortition, block_snapshot); last_sortitioned_block.map(|x| x.0) } -} - -impl NeonGenesisNode { - /// Instantiate and initialize a new node, given a config - pub fn new( - config: Config, - mut event_dispatcher: EventDispatcher, - burnchain: Burnchain, - boot_block_exec: Box ()>, - ) -> Self { - let keychain = Keychain::default(config.node.seed.clone()); - let initial_balances = config - .initial_balances - .iter() - .map(|e| (e.address.clone(), e.amount)) - .collect(); - - let mut boot_data = - ChainStateBootData::new(&burnchain, initial_balances, Some(boot_block_exec)); - - // do the initial open! - let (_chain_state, receipts) = match StacksChainState::open_and_exec( - config.is_mainnet(), - config.burnchain.chain_id, - &config.get_chainstate_path_str(), - Some(&mut boot_data), - ) { - Ok(res) => res, - Err(err) => panic!( - "Error while opening chain state at path {}: {:?}", - config.get_chainstate_path_str(), - err - ), - }; - - event_dispatcher.process_boot_receipts(receipts); - - Self { - keychain, - config, - event_dispatcher, - burnchain, - } - } - - pub fn into_initialized_leader_node( - self, - burnchain_tip: BurnchainTip, - blocks_processed: BlocksProcessedCounter, - microblocks_processed: BlocksProcessedCounter, - coord_comms: CoordinatorChannels, - sync_comms: PoxSyncWatchdogComms, - attachments_rx: Receiver>, - atlas_config: AtlasConfig, - should_keep_running: Arc, - ) -> InitializedNeonNode { - let config = self.config; - let keychain = self.keychain; - let event_dispatcher = self.event_dispatcher; - - InitializedNeonNode::new( - config, - keychain, - event_dispatcher, - Some(burnchain_tip), - true, - blocks_processed, - microblocks_processed, - coord_comms, - sync_comms, - self.burnchain, - attachments_rx, - atlas_config, - should_keep_running, - ) - } - pub fn into_initialized_node( - self, - burnchain_tip: BurnchainTip, - blocks_processed: BlocksProcessedCounter, - microblocks_processed: BlocksProcessedCounter, - coord_comms: CoordinatorChannels, - sync_comms: PoxSyncWatchdogComms, - attachments_rx: Receiver>, - atlas_config: AtlasConfig, - should_keep_running: Arc, - ) -> InitializedNeonNode { - let config = self.config; - let keychain = self.keychain; - let event_dispatcher = self.event_dispatcher; - - InitializedNeonNode::new( - config, - keychain, - event_dispatcher, - Some(burnchain_tip), - false, - blocks_processed, - microblocks_processed, - coord_comms, - sync_comms, - self.burnchain, - attachments_rx, - atlas_config, - should_keep_running, - ) + pub fn join(self) { + self.relayer_thread_handle.join().unwrap(); + self.p2p_thread_handle.join().unwrap(); } } diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 3636003a4a..0c1c2912cf 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -1,8 +1,16 @@ use std::cmp; use std::sync::atomic::{AtomicBool, Ordering}; + +#[cfg(test)] +use std::sync::atomic::AtomicU64; + use std::sync::mpsc::sync_channel; +use std::sync::mpsc::Receiver; use std::sync::Arc; use std::thread; +use std::thread::JoinHandle; + +use std::collections::HashSet; use stacks::deps::ctrlc as termination; use stacks::deps::ctrlc::SignalId; @@ -16,37 +24,112 @@ use stacks::chainstate::coordinator::{ check_chainstate_db_versions, BlockEventDispatcher, ChainsCoordinator, CoordinatorCommunication, }; use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; -use stacks::net::atlas::{AtlasConfig, Attachment}; +use stacks::net::atlas::{AtlasConfig, Attachment, AttachmentInstance}; use stx_genesis::GenesisData; use crate::monitoring::start_serving_monitoring_metrics; +use crate::neon_node::StacksNode; use crate::node::use_test_genesis_chainstate; -use crate::syncctl::PoxSyncWatchdog; +use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms}; use crate::{ node::{get_account_balances, get_account_lockups, get_names, get_namespaces}, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain, - NeonGenesisNode, }; use super::RunLoopCallbacks; use libc; pub const STDERR: i32 = 2; -/// Coordinating a node running in neon mode. #[cfg(test)] -pub struct RunLoop { - config: Config, - pub callbacks: RunLoopCallbacks, - blocks_processed: std::sync::Arc, - microblocks_processed: std::sync::Arc, - coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>, -} +pub type RunLoopCounter = Arc; #[cfg(not(test))] +pub type RunLoopCounter = (); + +#[derive(Clone)] +pub struct Counters { + pub blocks_processed: RunLoopCounter, + pub microblocks_processed: RunLoopCounter, + pub missed_tenures: RunLoopCounter, + pub missed_microblock_tenures: RunLoopCounter, + pub cancelled_commits: RunLoopCounter, +} + +impl Counters { + #[cfg(test)] + pub fn new() -> Counters { + Counters { + blocks_processed: RunLoopCounter::new(AtomicU64::new(0)), + microblocks_processed: RunLoopCounter::new(AtomicU64::new(0)), + missed_tenures: RunLoopCounter::new(AtomicU64::new(0)), + missed_microblock_tenures: RunLoopCounter::new(AtomicU64::new(0)), + cancelled_commits: RunLoopCounter::new(AtomicU64::new(0)), + } + } + + #[cfg(not(test))] + pub fn new() -> Counters { + Counters { + blocks_processed: (), + microblocks_processed: (), + missed_tenures: (), + missed_microblock_tenures: (), + cancelled_commits: (), + } + } + + #[cfg(test)] + fn inc(ctr: &RunLoopCounter) { + ctr.fetch_add(1, Ordering::SeqCst); + } + + #[cfg(not(test))] + fn inc(_ctr: &RunLoopCounter) {} + + #[cfg(test)] + fn set(ctr: &RunLoopCounter, value: u64) { + ctr.store(value, Ordering::SeqCst); + } + + #[cfg(not(test))] + fn set(_ctr: &RunLoopCounter, _value: u64) {} + + pub fn bump_blocks_processed(&self) { + Counters::inc(&self.blocks_processed); + } + + pub fn bump_microblocks_processed(&self) { + Counters::inc(&self.microblocks_processed); + } + + pub fn bump_missed_tenures(&self) { + Counters::inc(&self.missed_tenures); + } + + pub fn bump_missed_microblock_tenures(&self) { + Counters::inc(&self.missed_microblock_tenures); + } + + pub fn bump_cancelled_commits(&self) { + Counters::inc(&self.cancelled_commits); + } + + pub fn set_microblocks_processed(&self, value: u64) { + Counters::set(&self.microblocks_processed, value) + } +} + +/// Coordinating a node running in neon mode. pub struct RunLoop { config: Config, pub callbacks: RunLoopCallbacks, + counters: Counters, coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>, + should_keep_running: Arc, + event_dispatcher: EventDispatcher, + pox_watchdog: Option, // can't be instantiated until .start() is called + is_miner: Option, // not known until .start() is called + burnchain: Option, // not known until .start() is called } /// Write to stderr in an async-safe manner. @@ -69,25 +152,25 @@ fn async_safe_write_stderr(msg: &str) { impl RunLoop { /// Sets up a runloop and node, given a config. - #[cfg(not(test))] pub fn new(config: Config) -> Self { let channels = CoordinatorCommunication::instantiate(); - Self { - config, - coordinator_channels: Some(channels), - callbacks: RunLoopCallbacks::new(), + let should_keep_running = Arc::new(AtomicBool::new(true)); + + let mut event_dispatcher = EventDispatcher::new(); + for observer in config.events_observers.iter() { + event_dispatcher.register_observer(observer, should_keep_running.clone()); } - } - #[cfg(test)] - pub fn new(config: Config) -> Self { - let channels = CoordinatorCommunication::instantiate(); Self { config, coordinator_channels: Some(channels), callbacks: RunLoopCallbacks::new(), - blocks_processed: std::sync::Arc::new(std::sync::atomic::AtomicU64::new(0)), - microblocks_processed: std::sync::Arc::new(std::sync::atomic::AtomicU64::new(0)), + counters: Counters::new(), + should_keep_running: should_keep_running, + event_dispatcher, + pox_watchdog: None, + is_miner: None, + burnchain: None, } } @@ -95,46 +178,69 @@ impl RunLoop { self.coordinator_channels.as_ref().map(|x| x.1.clone()) } - #[cfg(test)] - pub fn get_blocks_processed_arc(&self) -> std::sync::Arc { - self.blocks_processed.clone() + pub fn get_blocks_processed_arc(&self) -> RunLoopCounter { + self.counters.blocks_processed.clone() } - #[cfg(not(test))] - fn get_blocks_processed_arc(&self) {} + pub fn get_microblocks_processed_arc(&self) -> RunLoopCounter { + self.counters.microblocks_processed.clone() + } - #[cfg(test)] - pub fn get_microblocks_processed_arc(&self) -> std::sync::Arc { - self.microblocks_processed.clone() + pub fn get_missed_tenures_arc(&self) -> RunLoopCounter { + self.counters.missed_tenures.clone() } - #[cfg(not(test))] - fn get_microblocks_processed_arc(&self) {} + pub fn get_missed_microblock_tenures_arc(&self) -> RunLoopCounter { + self.counters.missed_microblock_tenures.clone() + } - #[cfg(test)] - fn bump_blocks_processed(&self) { - self.blocks_processed - .fetch_add(1, std::sync::atomic::Ordering::SeqCst); + pub fn get_cancelled_commits_arc(&self) -> RunLoopCounter { + self.counters.cancelled_commits.clone() } - #[cfg(not(test))] - fn bump_blocks_processed(&self) {} + pub fn get_counters(&self) -> Counters { + self.counters.clone() + } - /// Starts the testnet runloop. - /// - /// This function will block by looping infinitely. - /// It will start the burnchain (separate thread), set-up a channel in - /// charge of coordinating the new blocks coming from the burnchain and - /// the nodes, taking turns on tenures. - pub fn start(&mut self, burnchain_opt: Option, mut mine_start: u64) { - let (coordinator_receivers, coordinator_senders) = self - .coordinator_channels - .take() - .expect("Run loop already started, can only start once after initialization."); + pub fn config(&self) -> &Config { + &self.config + } - let should_keep_running = Arc::new(AtomicBool::new(true)); - let keep_running_writer = should_keep_running.clone(); + pub fn get_event_dispatcher(&self) -> EventDispatcher { + self.event_dispatcher.clone() + } + + pub fn is_miner(&self) -> bool { + self.is_miner.unwrap_or(false) + } + + pub fn get_pox_sync_comms(&self) -> PoxSyncWatchdogComms { + self.pox_watchdog + .as_ref() + .expect("FATAL: tried to get PoX watchdog before calling .start()") + .make_comms_handle() + } + + pub fn get_termination_switch(&self) -> Arc { + self.should_keep_running.clone() + } + pub fn get_burnchain(&self) -> Burnchain { + self.burnchain + .clone() + .expect("FATAL: tried to get runloop burnchain before calling .start()") + } + + pub fn get_pox_watchdog(&mut self) -> &mut PoxSyncWatchdog { + self.pox_watchdog + .as_mut() + .expect("FATAL: tried to get PoX watchdog before calling .start()") + } + + /// Set up termination handler. Have a signal set the `should_keep_running` atomic bool to + /// false. Panics of called more than once. + fn setup_termination_handler(&self) { + let keep_running_writer = self.should_keep_running.clone(); let install = termination::set_handler(move |sig_id| match sig_id { SignalId::Bus => { let msg = "Caught SIGBUS; crashing immediately and dumping core\n"; @@ -157,30 +263,12 @@ impl RunLoop { panic!("FATAL: error setting termination handler - {}", e); } } + } - // Initialize and start the burnchain. - let mut burnchain = BitcoinRegtestController::with_burnchain( - self.config.clone(), - Some(coordinator_senders.clone()), - burnchain_opt, - Some(should_keep_running.clone()), - ); - - let pox_constants = burnchain.get_pox_constants(); - let epochs = burnchain.get_stacks_epochs(); - if !check_chainstate_db_versions( - &epochs, - &self.config.get_burn_db_file_path(), - &self.config.get_chainstate_path_str(), - ) - .expect("FATAL: unable to query filesystem or databases for version information") - { - panic!( - "FATAL: chainstate database(s) are not compatible with the current system epoch" - ); - } - - let is_miner = if self.config.node.miner { + /// Determine if we're the miner. + /// If there's a network error, then assume that we're not a miner. + fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool { + if self.config.node.miner { let keychain = Keychain::default(self.config.node.seed.clone()); let node_address = Keychain::address_from_burnchain_signer( &keychain.get_burnchain_signer(), @@ -191,7 +279,7 @@ impl RunLoop { BitcoinAddressType::PublicKeyHash, &node_address.to_bytes(), ) - .unwrap(); + .expect("FATAL: unable to determine Bitcoin address for miner"); info!("Miner node: checking UTXOs at address: {}", btc_addr); match burnchain.create_wallet_if_dne() { @@ -216,28 +304,90 @@ impl RunLoop { } else { info!("Will run as a Follower node"); false - }; + } + } - let burnchain_config = burnchain.get_burnchain(); - let mut target_burnchain_block_height = 1.max(burnchain_config.first_block_height); + /// Instantiate the burnchain client and databases. + /// Fetches headers and instantiates the burnchain. + /// Panics on failure. + fn instantiate_burnchain_state( + &mut self, + burnchain_opt: Option, + coordinator_senders: CoordinatorChannels, + ) -> BitcoinRegtestController { + // Initialize and start the burnchain. + let mut burnchain_controller = BitcoinRegtestController::with_burnchain( + self.config.clone(), + Some(coordinator_senders), + burnchain_opt, + Some(self.should_keep_running.clone()), + ); + + let burnchain_config = burnchain_controller.get_burnchain(); + let epochs = burnchain_controller.get_stacks_epochs(); + if !check_chainstate_db_versions( + &epochs, + &self.config.get_burn_db_file_path(), + &self.config.get_chainstate_path_str(), + ) + .expect("FATAL: unable to query filesystem or databases for version information") + { + error!( + "FATAL: chainstate database(s) are not compatible with the current system epoch" + ); + panic!(); + } info!("Start syncing Bitcoin headers, feel free to grab a cup of coffee, this can take a while"); - match burnchain.start(Some(target_burnchain_block_height)) { + + let target_burnchain_block_height = match burnchain_config + .get_highest_burnchain_block() + .expect("FATAL: failed to access burnchain database") + { + Some(burnchain_tip) => { + // database exists already, and has blocks -- just sync to its tip. + let target_height = burnchain_tip.block_height + 1; + debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {}", burnchain_tip.block_height, target_height); + target_height + } + None => { + // database does not exist yet + let target_height = 1.max(burnchain_config.first_block_height + 1); + debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {}", target_height); + target_height + } + }; + + match burnchain_controller.start(Some(target_burnchain_block_height)) { Ok(_) => {} Err(e) => { error!("Burnchain controller stopped: {}", e); - return; + panic!(); } }; // Invoke connect() to perform any db instantiation early - if let Err(e) = burnchain.connect_dbs() { + if let Err(e) = burnchain_controller.connect_dbs() { error!("Failed to connect to burnchain databases: {}", e); - return; + panic!(); }; - let mainnet = self.config.is_mainnet(); - let chainid = self.config.burnchain.chain_id; + // TODO (hack) instantiate the sortdb in the burnchain + let _ = burnchain_controller.sortdb_mut(); + burnchain_controller + } + + /// Instantiate the Stacks chain state and start the chains coordinator thread. + /// Returns the coordinator thread handle, and the receiving end of the coordinator's atlas + /// attachment channel. + fn spawn_chains_coordinator( + &mut self, + burnchain_config: &Burnchain, + coordinator_receivers: CoordinatorReceivers, + ) -> (JoinHandle<()>, Receiver>) { + let use_test_genesis_data = use_test_genesis_chainstate(&self.config); + + // load up genesis balances let initial_balances = self .config .initial_balances @@ -245,15 +395,8 @@ impl RunLoop { .map(|e| (e.address.clone(), e.amount)) .collect(); - // setup dispatcher - let mut event_dispatcher = EventDispatcher::new(); - for observer in self.config.events_observers.iter() { - event_dispatcher.register_observer(observer, should_keep_running.clone()); - } - - let use_test_genesis_data = use_test_genesis_chainstate(&self.config); - - let mut atlas_config = AtlasConfig::default(false); + // load up genesis Atlas attachments + let mut atlas_config = AtlasConfig::default(self.config.is_mainnet()); let genesis_attachments = GenesisData::new(use_test_genesis_data) .read_name_zonefiles() .into_iter() @@ -261,20 +404,14 @@ impl RunLoop { .collect(); atlas_config.genesis_attachments = Some(genesis_attachments); - let mut coordinator_dispatcher = event_dispatcher.clone(); - - let chainstate_path = self.config.get_chainstate_path_str(); - let coordinator_burnchain_config = burnchain_config.clone(); - - let (attachments_tx, attachments_rx) = sync_channel(1); - + // instantiate chainstate let mut boot_data = ChainStateBootData { initial_balances, post_flight_callback: None, - first_burnchain_block_hash: coordinator_burnchain_config.first_block_hash, - first_burnchain_block_height: coordinator_burnchain_config.first_block_height as u32, - first_burnchain_block_timestamp: coordinator_burnchain_config.first_block_timestamp, - pox_constants: coordinator_burnchain_config.pox_constants.clone(), + first_burnchain_block_hash: burnchain_config.first_block_hash, + first_burnchain_block_height: burnchain_config.first_block_height as u32, + first_burnchain_block_timestamp: burnchain_config.first_block_timestamp, + pox_constants: burnchain_config.pox_constants.clone(), get_bulk_initial_lockups: Some(Box::new(move || { get_account_lockups(use_test_genesis_data) })), @@ -288,17 +425,20 @@ impl RunLoop { }; let (chain_state_db, receipts) = StacksChainState::open_and_exec( - mainnet, - chainid, - &chainstate_path, + self.config.is_mainnet(), + self.config.burnchain.chain_id, + &self.config.get_chainstate_path_str(), Some(&mut boot_data), ) .unwrap(); - coordinator_dispatcher.dispatch_boot_receipts(receipts); + self.event_dispatcher.dispatch_boot_receipts(receipts); - let atlas_config = AtlasConfig::default(mainnet); - let moved_atlas_config = atlas_config.clone(); + // NOTE: re-instantiate AtlasConfig so we don't have to keep the genesis attachments around + let moved_atlas_config = AtlasConfig::default(self.config.is_mainnet()); let moved_config = self.config.clone(); + let moved_burnchain_config = burnchain_config.clone(); + let mut coordinator_dispatcher = self.event_dispatcher.clone(); + let (attachments_tx, attachments_rx) = sync_channel(1); let coordinator_thread_handle = thread::Builder::new() .name("chains-coordinator".to_string()) @@ -308,7 +448,7 @@ impl RunLoop { ChainsCoordinator::run( chain_state_db, - coordinator_burnchain_config, + moved_burnchain_config, attachments_tx, &mut coordinator_dispatcher, coordinator_receivers, @@ -317,64 +457,43 @@ impl RunLoop { fee_estimator.as_deref_mut(), ); }) - .unwrap(); + .expect("FATAL: failed to start chains coordinator thread"); - // We announce a new burn block so that the chains coordinator - // can resume prior work and handle eventual unprocessed sortitions - // stored during a previous session. - coordinator_senders.announce_new_burn_block(); - - let mut burnchain_tip = burnchain - .wait_for_sortitions(None) - .expect("Unable to get burnchain tip"); + (coordinator_thread_handle, attachments_rx) + } - let chainstate_path = self.config.get_chainstate_path_str(); - let mut pox_watchdog = PoxSyncWatchdog::new( - mainnet, - chainid, - chainstate_path, + /// Instantiate the PoX watchdog + fn instantiate_pox_watchdog(&mut self) { + let pox_watchdog = PoxSyncWatchdog::new( + self.config.is_mainnet(), + self.config.burnchain.chain_id, + self.config.get_chainstate_path_str(), self.config.burnchain.poll_time_secs, self.config.connection_options.timeout, self.config.node.pox_sync_sample_secs, self.config.node.pox_sync_sample_secs == 0, - should_keep_running.clone(), + self.should_keep_running.clone(), ) - .unwrap(); + .expect("FATAL: failed to instantiate PoX sync watchdog"); + self.pox_watchdog = Some(pox_watchdog); + } - // setup genesis - let node = NeonGenesisNode::new( - self.config.clone(), - event_dispatcher, - burnchain_config.clone(), - Box::new(|_| {}), - ); - let mut node = if is_miner { - node.into_initialized_leader_node( - burnchain_tip.clone(), - self.get_blocks_processed_arc(), - self.get_microblocks_processed_arc(), - coordinator_senders.clone(), - pox_watchdog.make_comms_handle(), - attachments_rx, - atlas_config, - should_keep_running.clone(), - ) - } else { - node.into_initialized_node( - burnchain_tip.clone(), - self.get_blocks_processed_arc(), - self.get_microblocks_processed_arc(), - coordinator_senders.clone(), - pox_watchdog.make_comms_handle(), - attachments_rx, - atlas_config, - should_keep_running.clone(), - ) - }; + /// Start Prometheus logging + fn start_prometheus(&mut self) { + let prometheus_bind = self.config.node.prometheus_bind.clone(); + if let Some(prometheus_bind) = prometheus_bind { + thread::Builder::new() + .name("prometheus".to_string()) + .spawn(move || { + start_serving_monitoring_metrics(prometheus_bind); + }) + .unwrap(); + } + } - // TODO (hack) instantiate the sortdb in the burnchain - let sortdb = burnchain.sortdb_mut(); - let mut block_height = { + /// Get the sortition DB's highest block height + fn get_sortition_db_height(sortdb: &SortitionDB, burnchain_config: &Burnchain) -> u64 { + let sortition_db_height = { let (stacks_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) .expect("BUG: failed to load canonical stacks chain tip hash"); @@ -394,156 +513,219 @@ impl RunLoop { } } }; + sortition_db_height + } - // Start the runloop - trace!("Begin run loop"); - self.bump_blocks_processed(); + /// Starts the node runloop. + /// + /// This function will block by looping infinitely. + /// It will start the burnchain (separate thread), set-up a channel in + /// charge of coordinating the new blocks coming from the burnchain and + /// the nodes, taking turns on tenures. + pub fn start(&mut self, burnchain_opt: Option, mut mine_start: u64) { + let (coordinator_receivers, coordinator_senders) = self + .coordinator_channels + .take() + .expect("Run loop already started, can only start once after initialization."); - let prometheus_bind = self.config.node.prometheus_bind.clone(); - if let Some(prometheus_bind) = prometheus_bind { - thread::Builder::new() - .name("prometheus".to_string()) - .spawn(move || { - start_serving_monitoring_metrics(prometheus_bind); - }) - .unwrap(); - } + self.setup_termination_handler(); + let mut burnchain = + self.instantiate_burnchain_state(burnchain_opt, coordinator_senders.clone()); + + let burnchain_config = burnchain.get_burnchain(); + self.burnchain = Some(burnchain_config.clone()); + + let is_miner = self.check_is_miner(&mut burnchain); + self.is_miner = Some(is_miner); + + // have headers; boot up the chains coordinator and instantiate the chain state + let (coordinator_thread_handle, attachments_rx) = + self.spawn_chains_coordinator(&burnchain_config, coordinator_receivers); + self.instantiate_pox_watchdog(); + + // We announce a new burn block so that the chains coordinator + // can resume prior work and handle eventual unprocessed sortitions + // stored during a previous session. + coordinator_senders.announce_new_burn_block(); + + // Wait for some sortitions! + let mut burnchain_tip = burnchain + .wait_for_sortitions(None) + .expect("Unable to get burnchain tip"); + + // Boot up the p2p network and relayer, and figure out how many sortitions we have so far + // (it could be non-zero if the node is resuming from chainstate) + let mut node = StacksNode::spawn( + self, + Some(burnchain_tip.clone()), + coordinator_senders.clone(), + attachments_rx, + ); + let sortdb = burnchain.sortdb_mut(); + let mut sortition_db_height = RunLoop::get_sortition_db_height(&sortdb, &burnchain_config); + + // Start the runloop + debug!("Begin run loop"); + self.start_prometheus(); + self.counters.bump_blocks_processed(); - let mut burnchain_height = block_height; + let mut burnchain_height = sortition_db_height; let mut num_sortitions_in_last_cycle = 1; - let mut learned_burnchain_height = false; // prepare to fetch the first reward cycle! - target_burnchain_block_height = burnchain_height + pox_constants.reward_cycle_length as u64; + let mut target_burnchain_block_height = burnchain_config.reward_cycle_to_block_height( + burnchain_config + .block_height_to_reward_cycle(burnchain_height) + .expect("BUG: block height is not in a reward cycle") + + 1, + ); debug!( "Begin main runloop starting a burnchain block {}", - block_height + sortition_db_height ); - let mut last_block_height = 0; + let mut last_tenure_sortition_height = 0; loop { - // Orchestrating graceful termination - if !should_keep_running.load(Ordering::SeqCst) { + if !self.should_keep_running.load(Ordering::SeqCst) { // The p2p thread relies on the same atomic_bool, it will // discontinue its execution after completing its ongoing runloop epoch. info!("Terminating p2p process"); info!("Terminating relayer"); info!("Terminating chains-coordinator"); - coordinator_senders.stop_chains_coordinator(); + coordinator_senders.stop_chains_coordinator(); coordinator_thread_handle.join().unwrap(); - node.relayer_thread_handle.join().unwrap(); - node.p2p_thread_handle.join().unwrap(); + node.join(); info!("Exiting stacks-node"); break; } + let remote_chain_height = burnchain.get_headers_height(); + // wait for the p2p state-machine to do at least one pass - debug!("Wait until we reach steady-state before processing more burnchain blocks..."); + debug!("Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); - // wait until it's okay to process the next sortitions - let ibd = match pox_watchdog.pox_sync_wait( + // wait until it's okay to process the next reward cycle's sortitions + let ibd = match self.get_pox_watchdog().pox_sync_wait( &burnchain_config, &burnchain_tip, - if learned_burnchain_height { - Some(burnchain_height) - } else { - None - }, + Some(remote_chain_height), num_sortitions_in_last_cycle, ) { Ok(ibd) => ibd, Err(e) => { - debug!("Pox sync wait routine aborted: {:?}", e); + debug!("PoX sync wait routine aborted: {:?}", e); continue; } }; - // will recalculate this - num_sortitions_in_last_cycle = 0; - let (next_burnchain_tip, next_burnchain_height) = - match burnchain.sync(Some(target_burnchain_block_height)) { - Ok(x) => x, - Err(e) => { - warn!("Burnchain controller stopped: {}", e); - continue; - } - }; + // will recalculate this in the following loop + num_sortitions_in_last_cycle = 0; - target_burnchain_block_height = cmp::min( - next_burnchain_height, - target_burnchain_block_height + pox_constants.reward_cycle_length as u64, - ); + // Download each burnchain block and process their sortitions. This, in turn, will + // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and + // process them. This loop runs for one reward cycle, so that the next pass of the + // runloop will cause the PoX sync watchdog to wait until it believes that the node has + // obtained all the Stacks blocks it can. + while burnchain_height <= target_burnchain_block_height { + if !self.should_keep_running.load(Ordering::SeqCst) { + break; + } - // *now* we know the burnchain height - learned_burnchain_height = true; - burnchain_tip = next_burnchain_tip; - burnchain_height = next_burnchain_height; + let (next_burnchain_tip, tip_burnchain_height) = + match burnchain.sync(Some(burnchain_height + 1)) { + Ok(x) => x, + Err(e) => { + warn!("Burnchain controller stopped: {}", e); + continue; + } + }; - let sortition_tip = &burnchain_tip.block_snapshot.sortition_id; - let next_height = burnchain_tip.block_snapshot.block_height; + // *now* we know the burnchain height + burnchain_tip = next_burnchain_tip; + burnchain_height = cmp::min(burnchain_height + 1, tip_burnchain_height); - if next_height != last_block_height { - info!( - "Downloaded burnchain blocks up to height {}; new target height is {}; next_height = {}, block_height = {}", - next_burnchain_height, target_burnchain_block_height, next_height, block_height - ); - } + let sortition_tip = &burnchain_tip.block_snapshot.sortition_id; + let next_sortition_height = burnchain_tip.block_snapshot.block_height; - if next_height > block_height { - debug!( - "New burnchain block height {} > {}", - next_height, block_height - ); + if next_sortition_height != last_tenure_sortition_height { + info!( + "Downloaded burnchain blocks up to height {}; target height is {}; next_sortition_height = {}, sortition_db_height = {}", + burnchain_height, target_burnchain_block_height, next_sortition_height, sortition_db_height + ); + } - let mut sort_count = 0; + if next_sortition_height > sortition_db_height { + debug!( + "New burnchain block height {} > {}", + next_sortition_height, sortition_db_height + ); - // first, let's process all blocks in (block_height, next_height] - for block_to_process in (block_height + 1)..(next_height + 1) { - let block = { - let ic = burnchain.sortdb_ref().index_conn(); - SortitionDB::get_ancestor_snapshot(&ic, block_to_process, sortition_tip) - .unwrap() - .expect("Failed to find block in fork processed by burnchain indexer") - }; - if block.sortition { - sort_count += 1; + let mut sort_count = 0; + + // first, let's process all blocks in (sortition_db_height, next_sortition_height] + for block_to_process in (sortition_db_height + 1)..(next_sortition_height + 1) { + let block = { + let ic = burnchain.sortdb_ref().index_conn(); + SortitionDB::get_ancestor_snapshot(&ic, block_to_process, sortition_tip) + .unwrap() + .expect( + "Failed to find block in fork processed by burnchain indexer", + ) + }; + if block.sortition { + sort_count += 1; + } + + let sortition_id = &block.sortition_id; + + // Have the node process the new block, that can include, or not, a sortition. + node.process_burnchain_state(burnchain.sortdb_mut(), sortition_id, ibd); + + // Now, tell the relayer to check if it won a sortition during this block, + // and, if so, to process and advertize the block. This is basically a + // no-op during boot-up. + // + // _this will block if the relayer's buffer is full_ + if !node.relayer_sortition_notify() { + // relayer hung up, exit. + error!("Block relayer and miner hung up, exiting."); + return; + } } - let sortition_id = &block.sortition_id; - - // Have the node process the new block, that can include, or not, a sortition. - node.process_burnchain_state(burnchain.sortdb_mut(), sortition_id, ibd); + num_sortitions_in_last_cycle = sort_count; + debug!( + "Synchronized burnchain up to block height {} from {} (chain tip height is {}); {} sortitions", + next_sortition_height, sortition_db_height, burnchain_height, num_sortitions_in_last_cycle; + ); - // Now, tell the relayer to check if it won a sortition during this block, - // and, if so, to process and advertize the block - // - // _this will block if the relayer's buffer is full_ - if !node.relayer_sortition_notify() { - // relayer hung up, exit. - error!("Block relayer and miner hung up, exiting."); - return; - } + sortition_db_height = next_sortition_height; + } else if ibd { + // drive block processing after we reach the burnchain tip. + // we may have downloaded all the blocks already, + // so we can't rely on the relayer alone to + // drive it. + coordinator_senders.announce_new_stacks_block(); } - num_sortitions_in_last_cycle = sort_count; - debug!( - "Synchronized burnchain up to block height {} from {} (chain tip height is {}); {} sortitions", - next_height, block_height, burnchain_height, num_sortitions_in_last_cycle; - ); - - block_height = next_height; - } else if ibd { - // drive block processing after we reach the burnchain tip. - // we may have downloaded all the blocks already, - // so we can't rely on the relayer alone to - // drive it. - coordinator_senders.announce_new_stacks_block(); + if burnchain_height == target_burnchain_block_height + || burnchain_height == tip_burnchain_height + { + break; + } } - if block_height >= burnchain_height && !ibd { + target_burnchain_block_height = burnchain_config.reward_cycle_to_block_height( + burnchain_config + .block_height_to_reward_cycle(burnchain_height) + .expect("BUG: block height is not in a reward cycle") + + 1, + ); + + if sortition_db_height >= burnchain_height && !ibd { let canonical_stacks_tip_height = SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn()) .map(|snapshot| snapshot.canonical_stacks_tip_height) @@ -560,12 +742,12 @@ impl RunLoop { mine_start = 0; // at tip, and not downloading. proceed to mine. - if last_block_height != block_height { + if last_tenure_sortition_height != sortition_db_height { info!( "Synchronized full burnchain up to height {}. Proceeding to mine blocks", - block_height + sortition_db_height ); - last_block_height = block_height; + last_tenure_sortition_height = sortition_db_height; } if !node.relayer_issue_tenure() { // relayer hung up, exit. diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 97678cbff9..62ed4b8e32 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -2,6 +2,7 @@ use stacks::util::get_epoch_time_secs; use std::collections::HashMap; use std::env; use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; use std::sync::Arc; use std::thread; @@ -21,6 +22,7 @@ use stacks::types::chainstate::BurnchainHeaderHash; use stacks::types::chainstate::StacksAddress; use stacks::types::chainstate::StacksBlockHeader; use stacks::util::hash::hex_bytes; +use stacks::util::sleep_ms; use stacks::vm::types::PrincipalData; use stacks::vm::ContractName; use std::convert::TryFrom; @@ -32,6 +34,7 @@ use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::make_contract_call; use crate::tests::make_contract_call_mblock_only; use crate::tests::make_contract_publish; +use crate::tests::make_contract_publish_microblock_only; use crate::tests::neon_integrations::*; use crate::tests::to_addr; use crate::BitcoinRegtestController; @@ -913,3 +916,350 @@ fn test_cost_limit_switch_version205() { channel.stop_chains_coordinator(); } + +// mine a stream of microblocks, and verify that microblock streams can get bigger after the epoch +// transition +#[test] +#[ignore] +fn bigger_microblock_streams_in_2_05() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let spender_sks: Vec<_> = (0..10) + .into_iter() + .map(|_| StacksPrivateKey::new()) + .collect(); + let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + + let txs: Vec> = spender_sks + .iter() + .enumerate() + .map(|(ix, spender_sk)| { + // almost fills a whole block + make_contract_publish_microblock_only( + spender_sk, + 0, + 1049230, + &format!("large-{}", ix), + &format!(" + ;; a single one of these transactions consumes over half the runtime budget + (define-constant BUFF_TO_BYTE (list + 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f + 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f + 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f + 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f + 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f + 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f + 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f + 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f + 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f + 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f + 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf + 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf + 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf + 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf + 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef + 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff + )) + (define-private (crash-me-folder (input (buff 1)) (ctr uint)) + (begin + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (+ u1 ctr) + ) + ) + (define-public (crash-me (name (string-ascii 128))) + (begin + (fold crash-me-folder BUFF_TO_BYTE u0) + (print name) + (ok u0) + ) + ) + (begin + (crash-me \"{}\")) + ", + &format!("large-contract-{}", &ix) + ) + ) + }) + .collect(); + + let (mut conf, miner_account) = neon_integration_test_conf(); + + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 10492300000, + }); + } + + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 0; + conf.node.max_microblocks = 65536; + conf.burnchain.max_rbf = 1000000; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + conf.burnchain.epochs = Some(vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 206, + block_limit: ExecutionCost { + write_length: 15000000, + write_count: 7750, + read_length: 100000000, + read_count: 7750, + runtime: 5000000000, + }, + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 206, + end_height: 9223372036854775807, + block_limit: ExecutionCost { + write_length: 15000000, + write_count: 7750 * 2, + read_length: 100000000, + read_count: 7750 * 2, + runtime: 5000000000, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + ]); + + test_observer::spawn(); + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let microblocks_processed = run_loop.get_microblocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(None, 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // zeroth block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // let's query the miner's account nonce: + let account = get_account(&http_origin, &miner_account); + assert_eq!(account.nonce, 1); + assert_eq!(account.balance, 0); + + for spender_addr in spender_addrs.iter() { + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.nonce, 0); + assert_eq!(account.balance, 10492300000); + } + + let mut ctr = 0; + while ctr < txs.len() { + submit_tx(&http_origin, &txs[ctr]); + if !wait_for_microblocks(µblocks_processed, 30) { + // we time out if we *can't* mine any more microblocks + break; + } + ctr += 1; + } + microblocks_processed.store(0, Ordering::SeqCst); + + // only one fit + assert_eq!(ctr, 1); + sleep_ms(5_000); + + // confirm it + eprintln!("confirm epoch 2.0 microblock stream"); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // send the rest of the transactions + while ctr < txs.len() { + submit_tx(&http_origin, &txs[ctr]); + ctr += 1; + } + + eprintln!("expect epoch transition"); + + microblocks_processed.store(0, Ordering::SeqCst); + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // don't bother waiting for a microblock stream + + eprintln!("expect epoch 2.05 microblock stream"); + + microblocks_processed.store(0, Ordering::SeqCst); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + wait_for_microblocks(µblocks_processed, 180); + + microblocks_processed.store(0, Ordering::SeqCst); + + // this test can sometimes miss a mine block event. + sleep_ms(120_000); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let mut epoch_20_stream_cost = ExecutionCost::zero(); + let mut epoch_205_stream_cost = ExecutionCost::zero(); + + // max == largest number of transactions per stream in a given epoch (2.0 or 2.05) + // total == number of transactions across all streams in a given epoch (2.0 or 2.05) + let mut max_big_txs_per_microblock_20 = 0; + let mut total_big_txs_per_microblock_20 = 0; + + let mut max_big_txs_per_microblock_205 = 0; + let mut total_big_txs_per_microblock_205 = 0; + + let mut in_205; + let mut have_confirmed_205_stream; + + for i in 0..10 { + let blocks = test_observer::get_blocks(); + + max_big_txs_per_microblock_20 = 0; + total_big_txs_per_microblock_20 = 0; + + max_big_txs_per_microblock_205 = 0; + total_big_txs_per_microblock_205 = 0; + + in_205 = false; + have_confirmed_205_stream = false; + + // NOTE: this only counts the number of txs per stream, not in each microblock + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + eprintln!("{}", transactions.len()); + + let mut num_big_microblock_txs = 0; + let mut total_execution_cost = ExecutionCost::zero(); + + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::SmartContract(tsc) = parsed.payload { + if tsc.name.to_string().find("costs-2").is_some() { + in_205 = true; + } else if tsc.name.to_string().find("large").is_some() { + num_big_microblock_txs += 1; + if in_205 { + total_big_txs_per_microblock_205 += 1; + } else { + total_big_txs_per_microblock_20 += 1; + } + } + } + let execution_cost = tx.get("execution_cost").unwrap(); + total_execution_cost.read_count += + execution_cost.get("read_count").unwrap().as_i64().unwrap() as u64; + total_execution_cost.read_length += + execution_cost.get("read_length").unwrap().as_i64().unwrap() as u64; + total_execution_cost.write_count += + execution_cost.get("write_count").unwrap().as_i64().unwrap() as u64; + total_execution_cost.write_length += execution_cost + .get("write_length") + .unwrap() + .as_i64() + .unwrap() as u64; + total_execution_cost.runtime += + execution_cost.get("runtime").unwrap().as_i64().unwrap() as u64; + } + if in_205 && num_big_microblock_txs > max_big_txs_per_microblock_205 { + max_big_txs_per_microblock_205 = num_big_microblock_txs; + } + if !in_205 && num_big_microblock_txs > max_big_txs_per_microblock_20 { + max_big_txs_per_microblock_20 = num_big_microblock_txs; + } + + eprintln!("Epoch size: {:?}", &total_execution_cost); + + if !in_205 && total_execution_cost.exceeds(&epoch_20_stream_cost) { + epoch_20_stream_cost = total_execution_cost; + break; + } + if in_205 && total_execution_cost.exceeds(&ExecutionCost::zero()) { + have_confirmed_205_stream = true; + epoch_205_stream_cost = total_execution_cost; + break; + } + } + + if have_confirmed_205_stream { + break; + } else { + eprintln!("Trying to confirm a stream again (attempt {})", i + 1); + sleep_ms((i + 2) * 60_000); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + } + + eprintln!( + "max_big_txs_per_microblock_20: {}, total_big_txs_per_microblock_20: {}", + max_big_txs_per_microblock_20, total_big_txs_per_microblock_20 + ); + eprintln!( + "max_big_txs_per_microblock_205: {}, total_big_txs_per_microblock_205: {}", + max_big_txs_per_microblock_205, total_big_txs_per_microblock_205 + ); + eprintln!( + "confirmed stream execution in 2.0: {:?}", + &epoch_20_stream_cost + ); + eprintln!( + "confirmed stream execution in 2.05: {:?}", + &epoch_205_stream_cost + ); + + // stuff happened + assert!(epoch_20_stream_cost.runtime > 0); + assert!(epoch_205_stream_cost.runtime > 0); + + // more stuff happened in epoch 2.05 + assert!(epoch_205_stream_cost.read_count > epoch_20_stream_cost.read_count); + assert!(epoch_205_stream_cost.read_length > epoch_20_stream_cost.read_length); + assert!(epoch_205_stream_cost.write_count > epoch_20_stream_cost.write_count); + assert!(epoch_205_stream_cost.write_length > epoch_20_stream_cost.write_length); + + // but epoch 2.05 was *cheaper* in terms of CPU + assert!(epoch_205_stream_cost.runtime < epoch_20_stream_cost.runtime); + + test_observer::clear(); + channel.stop_chains_coordinator(); +} diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 7b56fbfb92..25b6d5e6bf 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -849,6 +849,18 @@ fn integration_test_get_info() { eprintln!("Test: GET {}", path); assert!(!res.is_implemented); + // test query parameters for v2/trait endpoint + // evaluate check for explicit compliance against the chain tip of the first block (contract DNE at that block) + let path = format!("{}/v2/traits/{}/{}/{}/{}/{}?tip=753d84de5c475a85abd0eeb3ac87da03ff0f794507b60a3f66356425bc1dedaf", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1"); + let res = client.get(&path).send().unwrap(); + eprintln!("Test: GET {}", path); + assert_eq!(res.text().unwrap(), "No contract analysis found or trait definition not found"); + + // evaluate check for explicit compliance where tip is the chain tip of the first block (contract DNE at that block), but tip is "latest" + let path = format!("{}/v2/traits/{}/{}/{}/{}/{}?tip=latest", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1"); + let res = client.get(&path).send().unwrap().json::().unwrap(); + eprintln!("Test: GET {}", path); + assert!(res.is_implemented); // perform some tests of the fee rate interface let path = format!("{}/v2/fees/transaction", &http_origin); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 579f0ad509..28fa6e958f 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -21,7 +21,7 @@ use stacks::core; use stacks::core::CHAIN_ID_TESTNET; use stacks::net::atlas::{AtlasConfig, AtlasDB, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST}; use stacks::net::{ - AccountEntryResponse, GetAttachmentResponse, GetAttachmentsInvResponse, + AccountEntryResponse, ContractSrcResponse, GetAttachmentResponse, GetAttachmentsInvResponse, PostTransactionRequestBody, RPCPeerInfoData, }; use stacks::types::chainstate::{ @@ -29,7 +29,7 @@ use stacks::types::chainstate::{ StacksMicroblockHeader, }; use stacks::util::hash::Hash160; -use stacks::util::hash::{bytes_to_hex, hex_bytes}; +use stacks::util::hash::{bytes_to_hex, hex_bytes, to_hex}; use stacks::util::secp256k1::Secp256k1PublicKey; use stacks::util::{get_epoch_time_ms, get_epoch_time_secs, sleep_ms}; use stacks::vm::database::ClarityDeserializable; @@ -46,7 +46,7 @@ use stacks::{ use stacks::{ chainstate::stacks::{ db::StacksChainState, StacksBlock, StacksPrivateKey, StacksPublicKey, StacksTransaction, - TransactionPayload, + TransactionContractCall, TransactionPayload, }, net::RPCPoxInfoData, util::db::query_row_columns, @@ -71,6 +71,15 @@ use super::{ make_microblock, make_stacks_transfer, make_stacks_transfer_mblock_only, to_addr, ADDR_4, SK_1, SK_2, }; +use stacks::chainstate::stacks::miner::{ + TransactionErrorEvent, TransactionEvent, TransactionSkippedEvent, TransactionSuccessEvent, +}; + +use crate::config::FeeEstimatorName; +use stacks::net::RPCFeeEstimateResponse; +use stacks::vm::ClarityName; +use stacks::vm::ContractName; +use std::convert::TryFrom; pub fn neon_integration_test_conf() -> (Config, StacksAddress) { let mut conf = super::new_test_conf(); @@ -116,13 +125,14 @@ pub mod test_observer { use warp; use warp::Filter; - use crate::event_dispatcher::MinedBlockEvent; + use crate::event_dispatcher::{MinedBlockEvent, MinedMicroblockEvent}; pub const EVENT_OBSERVER_PORT: u16 = 50303; lazy_static! { pub static ref NEW_BLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref MINED_BLOCKS: Mutex> = Mutex::new(Vec::new()); + pub static ref MINED_MICROBLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref NEW_MICROBLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref BURN_BLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref MEMTXS: Mutex> = Mutex::new(Vec::new()); @@ -154,10 +164,49 @@ pub mod test_observer { async fn handle_mined_block(block: serde_json::Value) -> Result { let mut mined_blocks = MINED_BLOCKS.lock().unwrap(); + // assert that the mined transaction events have string-y txids + block + .as_object() + .expect("Expected JSON object for mined block event") + .get("tx_events") + .expect("Expected tx_events key in mined block event") + .as_array() + .expect("Expected tx_events key to be an array in mined block event") + .iter() + .for_each(|txevent| { + let txevent_obj = txevent.as_object().expect("TransactionEvent should be object"); + let inner_obj = if let Some(inner_obj) = txevent_obj.get("Success") { + inner_obj + } else if let Some(inner_obj) = txevent_obj.get("ProcessingError") { + inner_obj + } else if let Some(inner_obj) = txevent_obj.get("Skipped") { + inner_obj + } else { + panic!("TransactionEvent object should have one of Success, ProcessingError, or Skipped") + }; + inner_obj + .as_object() + .expect("TransactionEvent should be an object") + .get("txid") + .expect("Should have txid key") + .as_str() + .expect("Expected txid to be a string"); + }); + mined_blocks.push(serde_json::from_value(block).unwrap()); Ok(warp::http::StatusCode::OK) } + /// Called by the process listening to events on a mined microblock event. The event is added + /// to the mutex-guarded vector `MINED_MICROBLOCKS`. + async fn handle_mined_microblock( + tx_event: serde_json::Value, + ) -> Result { + let mut mined_txs = MINED_MICROBLOCKS.lock().unwrap(); + mined_txs.push(serde_json::from_value(tx_event).unwrap()); + Ok(warp::http::StatusCode::OK) + } + async fn handle_mempool_txs(txs: serde_json::Value) -> Result { let new_rawtxs = txs .as_array() @@ -229,6 +278,10 @@ pub mod test_observer { MINED_BLOCKS.lock().unwrap().clone() } + pub fn get_mined_microblocks() -> Vec { + MINED_MICROBLOCKS.lock().unwrap().clone() + } + /// each path here should correspond to one of the paths listed in `event_dispatcher.rs` async fn serve() { let new_blocks = warp::path!("new_block") @@ -259,6 +312,10 @@ pub mod test_observer { .and(warp::post()) .and(warp::body::json()) .and_then(handle_mined_block); + let mined_microblocks = warp::path!("mined_microblock") + .and(warp::post()) + .and(warp::body::json()) + .and_then(handle_mined_microblock); info!("Spawning warp server"); warp::serve( @@ -268,7 +325,8 @@ pub mod test_observer { .or(new_burn_blocks) .or(new_attachments) .or(new_microblocks) - .or(mined_blocks), + .or(mined_blocks) + .or(mined_microblocks), ) .run(([127, 0, 0, 1], EVENT_OBSERVER_PORT)) .await @@ -277,7 +335,7 @@ pub mod test_observer { pub fn spawn() { clear(); thread::spawn(|| { - let mut rt = tokio::runtime::Runtime::new().expect("Failed to initialize tokio"); + let rt = tokio::runtime::Runtime::new().expect("Failed to initialize tokio"); rt.block_on(serve()); }); } @@ -321,6 +379,37 @@ pub fn next_block_and_wait( true } +/// This function will call `next_block_and_wait` until the burnchain height underlying `BitcoinRegtestController` +/// reaches *exactly* `target_height`. +/// +/// Returns `false` if `next_block_and_wait` times out. +fn run_until_burnchain_height( + btc_regtest_controller: &mut BitcoinRegtestController, + blocks_processed: &Arc, + target_height: u64, + conf: &Config, +) -> bool { + let tip_info = get_chain_info(&conf); + let mut current_height = tip_info.burn_block_height; + + while current_height < target_height { + eprintln!( + "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({})", + get_epoch_time_secs(), + current_height + ); + let next_result = next_block_and_wait(btc_regtest_controller, &blocks_processed); + if !next_result { + return false; + } + let tip_info = get_chain_info(&conf); + current_height = tip_info.burn_block_height; + } + + assert_eq!(current_height, target_height); + true +} + pub fn wait_for_runloop(blocks_processed: &Arc) { let start = Instant::now(); while blocks_processed.load(Ordering::SeqCst) == 0 { @@ -331,10 +420,12 @@ pub fn wait_for_runloop(blocks_processed: &Arc) { } } -fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64) -> bool { +/// Wait for at least one microblock to be mined, up to a given timeout (in seconds). +/// Returns true if the microblock was mined; false if we timed out. +pub fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64) -> bool { let mut current = microblocks_processed.load(Ordering::SeqCst); let start = Instant::now(); - info!("Waiting for next microblock"); + info!("Waiting for next microblock (current = {})", ¤t); loop { let now = microblocks_processed.load(Ordering::SeqCst); if now == 0 && current != 0 { @@ -351,12 +442,13 @@ fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64) -> } if start.elapsed() > Duration::from_secs(timeout) { - warn!("Timed out waiting for microblocks to process"); + warn!("Timed out waiting for microblocks to process ({})", timeout); return false; } thread::sleep(Duration::from_millis(100)); } + info!("Next microblock acknowledged"); return true; } @@ -439,6 +531,12 @@ fn find_microblock_privkey( return None; } +/// Returns true iff `b` is within `0.1%` of `a`. +fn is_close_f64(a: f64, b: f64) -> bool { + let error = (a - b).abs() / a.abs(); + error < 0.001 +} + #[test] #[ignore] fn bitcoind_integration_test() { @@ -699,6 +797,33 @@ fn get_chain_tip_height(http_origin: &str) -> u64 { res.stacks_tip_height } +fn get_contract_src( + http_origin: &str, + contract_addr: StacksAddress, + contract_name: String, + use_latest_tip: bool, +) -> Result { + let client = reqwest::blocking::Client::new(); + let query_string = if use_latest_tip { + "?tip=latest".to_string() + } else { + "".to_string() + }; + let path = format!( + "{}/v2/contracts/source/{}/{}{}", + http_origin, contract_addr, contract_name, query_string + ); + let res = client.get(&path).send().unwrap(); + + if res.status().is_success() { + let contract_src_res = res.json::().unwrap(); + Ok(contract_src_res.source) + } else { + let err_str = res.text().unwrap(); + Err(err_str) + } +} + #[test] #[ignore] fn liquid_ustx_integration() { @@ -1326,7 +1451,8 @@ fn bitcoind_forking_test() { btc_regtest_controller.invalidate_block(&burn_header_hash_to_fork); btc_regtest_controller.build_next_block(5); - thread::sleep(Duration::from_secs(5)); + thread::sleep(Duration::from_secs(50)); + eprintln!("Wait for block off of shallow fork"); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let account = get_account(&http_origin, &miner_account); @@ -1345,7 +1471,8 @@ fn bitcoind_forking_test() { btc_regtest_controller.invalidate_block(&burn_header_hash_to_fork); btc_regtest_controller.build_next_block(10); - thread::sleep(Duration::from_secs(5)); + thread::sleep(Duration::from_secs(50)); + eprintln!("Wait for block off of deep fork"); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let account = get_account(&http_origin, &miner_account); @@ -1357,8 +1484,9 @@ fn bitcoind_forking_test() { let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); // but we're able to keep on mining - assert_eq!(account.nonce, 3); + assert!(account.nonce >= 3); + eprintln!("End of test"); channel.stop_chains_coordinator(); } @@ -1485,8 +1613,9 @@ fn microblock_integration_test() { }); conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 10_000; conf.node.microblock_frequency = 1_000; + conf.miner.microblock_attempt_time_ms = 1_000; + conf.node.wait_time_for_microblocks = 0; test_observer::spawn(); @@ -1554,14 +1683,18 @@ fn microblock_integration_test() { // now let's mine a couple blocks, and then check the sender's nonce. // this one wakes up our node, so that it'll mine a microblock _and_ an anchor block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(10_000); + // this one will contain the sortition from above anchor block, // which *should* have also confirmed the microblock. info!("Wait for second block"); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(10_000); // I guess let's push another block for good measure? info!("Wait for third block"); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(10_000); info!("Test microblock"); @@ -1665,16 +1798,17 @@ fn microblock_integration_test() { let tip_info = loop { let tip_info = get_chain_info(&conf); eprintln!("{:#?}", tip_info); - if tip_info.unanchored_tip == StacksBlockId([0; 32]) { - iter_count += 1; - assert!( - iter_count < 10, - "Hit retry count while waiting for net module to process pushed microblock" - ); - sleep_ms(5_000); - continue; - } else { - break tip_info; + match tip_info.unanchored_tip { + None => { + iter_count += 1; + assert!( + iter_count < 10, + "Hit retry count while waiting for net module to process pushed microblock" + ); + sleep_ms(5_000); + continue; + } + Some(_tip) => break tip_info, } }; @@ -1831,7 +1965,9 @@ fn microblock_integration_test() { // we can query unconfirmed state from the microblock we announced let path = format!( "{}/v2/accounts/{}?proof=0&tip={}", - &http_origin, &spender_addr, &tip_info.unanchored_tip + &http_origin, + &spender_addr, + &tip_info.unanchored_tip.unwrap() ); eprintln!("{:?}", &path); @@ -1908,7 +2044,9 @@ fn microblock_integration_test() { // we can query _new_ unconfirmed state from the microblock we announced let path = format!( "{}/v2/accounts/{}?proof=0&tip={}", - &http_origin, &spender_addr, &tip_info.unanchored_tip + &http_origin, + &spender_addr, + &tip_info.unanchored_tip.unwrap() ); let res_text = client.get(&path).send().unwrap().text().unwrap(); @@ -2300,7 +2438,8 @@ fn size_check_integration_test() { conf.node.mine_microblocks = true; conf.node.wait_time_for_microblocks = 5000; - conf.node.microblock_frequency = 1000; + conf.node.microblock_frequency = 5000; + conf.miner.microblock_attempt_time_ms = 120_000; conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; @@ -2477,6 +2616,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { conf.node.mine_microblocks = true; conf.node.wait_time_for_microblocks = 5_000; conf.node.microblock_frequency = 5_000; + conf.miner.microblock_attempt_time_ms = 120_000; conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; @@ -2671,6 +2811,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { conf.node.mine_microblocks = true; conf.node.wait_time_for_microblocks = 1000; conf.node.microblock_frequency = 1000; + conf.miner.microblock_attempt_time_ms = 120_000; conf.node.max_microblocks = 65536; conf.burnchain.max_rbf = 1000000; @@ -2861,6 +3002,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { conf.node.mine_microblocks = true; conf.node.wait_time_for_microblocks = 5_000; conf.node.microblock_frequency = 1_000; + conf.miner.microblock_attempt_time_ms = 120_000; conf.node.max_microblocks = 65536; conf.burnchain.max_rbf = 1000000; @@ -3129,6 +3271,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { conf.node.mine_microblocks = true; conf.node.wait_time_for_microblocks = 0; conf.node.microblock_frequency = 15000; + conf.miner.microblock_attempt_time_ms = 120_000; conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; @@ -3714,6 +3857,205 @@ fn cost_voting_integration() { channel.stop_chains_coordinator(); } +#[test] +#[ignore] +fn mining_events_integration_test() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let small_contract = "(define-public (f) (ok 1))".to_string(); + + let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let addr = to_addr(&spender_sk); + + let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); + let addr_2 = to_addr(&spender_sk_2); + + let tx = make_contract_publish(&spender_sk, 0, 600000, "small", &small_contract); + let tx_2 = make_contract_publish(&spender_sk, 1, 610000, "small", &small_contract); + let mb_tx = + make_contract_publish_microblock_only(&spender_sk_2, 0, 620000, "small", &small_contract); + + let (mut conf, _) = neon_integration_test_conf(); + + conf.initial_balances.push(InitialBalance { + address: addr.clone().into(), + amount: 10000000, + }); + conf.initial_balances.push(InitialBalance { + address: addr_2.clone().into(), + amount: 10000000, + }); + + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 30000; + conf.node.microblock_frequency = 1000; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![ + EventKeyType::AnyEvent, + EventKeyType::MinedBlocks, + EventKeyType::MinedMicroblocks, + ], + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(None, 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + submit_tx(&http_origin, &tx); // should succeed + submit_tx(&http_origin, &tx_2); // should fail since it tries to publish contract with same name + submit_tx(&http_origin, &mb_tx); // should be in microblock bc it is microblock only + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // check that the nonces have gone up + let res = get_account(&http_origin, &addr); + assert_eq!(res.nonce, 1); + + let res = get_account(&http_origin, &addr_2); + assert_eq!(res.nonce, 1); + + // check mined microblock events + let mined_microblock_events = test_observer::get_mined_microblocks(); + assert!(mined_microblock_events.len() >= 1); + + // check tx events in the first microblock + // 1 success: 1 contract publish, 2 error (on chain transactions) + let microblock_tx_events = &mined_microblock_events[0].tx_events; + assert_eq!(microblock_tx_events.len(), 3); + + // contract publish + match µblock_tx_events[0] { + TransactionEvent::Success(TransactionSuccessEvent { + result, + fee, + execution_cost, + .. + }) => { + assert_eq!(result.clone().expect_result_ok().expect_bool(), true); + assert_eq!(fee, &620000); + assert_eq!( + execution_cost, + &ExecutionCost { + write_length: 35, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 311000 + } + ) + } + _ => panic!("unexpected event type"), + } + for i in 1..3 { + // on chain only transactions will be skipped in a microblock + match µblock_tx_events[i] { + TransactionEvent::Skipped(TransactionSkippedEvent { error, .. }) => { + assert_eq!(error, "Invalid transaction anchor mode for streamed data"); + } + _ => panic!("unexpected event type"), + } + } + + // check mined block events + let mined_block_events = test_observer::get_mined_blocks(); + assert!(mined_block_events.len() >= 3); + + // check the tx events in the third mined block + // 2 success: 1 coinbase tx event + 1 contract publish, 1 error (duplicate contract) + let third_block_tx_events = &mined_block_events[2].tx_events; + assert_eq!(third_block_tx_events.len(), 3); + + // coinbase event + match &third_block_tx_events[0] { + TransactionEvent::Success(TransactionSuccessEvent { txid, result, .. }) => { + assert_eq!( + txid.to_string(), + "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6" + ); + assert_eq!(result.clone().expect_result_ok().expect_bool(), true); + } + _ => panic!("unexpected event type"), + } + + // contract publish event + match &third_block_tx_events[1] { + TransactionEvent::Success(TransactionSuccessEvent { + result, + fee, + execution_cost, + .. + }) => { + assert_eq!(result.clone().expect_result_ok().expect_bool(), true); + assert_eq!(fee, &600000); + assert_eq!( + execution_cost, + &ExecutionCost { + write_length: 35, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 311000 + } + ) + } + _ => panic!("unexpected event type"), + } + + // dupe contract error event + match &third_block_tx_events[2] { + TransactionEvent::ProcessingError(TransactionErrorEvent { txid: _, error }) => { + assert_eq!( + error, + "Duplicate contract 'ST3WM51TCWMJYGZS1QFMC28DH5YP86782YGR113C1.small'" + ); + } + _ => panic!("unexpected event type"), + } + + test_observer::clear(); + channel.stop_chains_coordinator(); +} + #[test] #[ignore] fn near_full_block_integration_test() { @@ -3742,14 +4084,14 @@ fn near_full_block_integration_test() { ); let spender_sk = StacksPrivateKey::new(); - let addr = to_addr(&spender_sk); + let spender_addr = to_addr(&spender_sk); - let tx = make_contract_publish(&spender_sk, 0, 58450, "max", &max_contract_src); + let tx = make_contract_publish(&spender_sk, 0, 59070, "max", &max_contract_src); let (mut conf, miner_account) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: spender_addr.clone().into(), amount: 10000000, }); @@ -3797,7 +4139,7 @@ fn near_full_block_integration_test() { assert_eq!(account.nonce, 1); assert_eq!(account.balance, 0); - let account = get_account(&http_origin, &addr); + let account = get_account(&http_origin, &spender_addr); assert_eq!(account.nonce, 0); assert_eq!(account.balance, 10000000); @@ -3809,7 +4151,7 @@ fn near_full_block_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let res = get_account(&http_origin, &addr); + let res = get_account(&http_origin, &spender_addr); assert_eq!(res.nonce, 1); test_observer::clear(); @@ -4093,7 +4435,7 @@ fn pox_integration_test() { // let's stack with spender 2 and spender 3... - // now let's have sender_2 and sender_3 stack to pox addr 2 in + // now let's have sender_2 and sender_3 stack to pox spender_addr 2 in // two different txs, and make sure that they sum together in the reward set. let tx = make_contract_call( @@ -5844,3 +6186,497 @@ fn atlas_stress_integration_test() { test_observer::clear(); } + +/// Run a fixed contract 20 times. Linearly increase the amount paid each time. The cost of the +/// contract should stay the same, and the fee rate paid should monotonically grow. The value +/// should grow faster for lower values of `window_size`, because a bigger window slows down the +/// growth. +fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value: f64) { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let max_contract_src = r#" +;; define counter variable +(define-data-var counter int 0) + +;; increment method +(define-public (increment) + (begin + (var-set counter (+ (var-get counter) 1)) + (ok (var-get counter)))) + + (define-public (increment-many) + (begin + (unwrap! (increment) (err u1)) + (unwrap! (increment) (err u1)) + (unwrap! (increment) (err u1)) + (unwrap! (increment) (err u1)) + (ok (var-get counter)))) + "# + .to_string(); + + let spender_sk = StacksPrivateKey::new(); + let spender_addr = to_addr(&spender_sk); + + let (mut conf, _) = neon_integration_test_conf(); + + // Set this estimator as special. + conf.estimation.fee_estimator = Some(FeeEstimatorName::FuzzedWeightedMedianFeeRate); + // Use randomness of 0 to keep test constant. Randomness is tested in unit tests. + conf.estimation.fee_rate_fuzzer_fraction = 0f64; + conf.estimation.fee_rate_window_size = window_size; + + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone().into(), + amount: 10000000000, + }); + test_observer::spawn(); + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(200); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(None, 0)); + + wait_for_runloop(&blocks_processed); + run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 210, &conf); + + submit_tx( + &http_origin, + &make_contract_publish( + &spender_sk, + 0, + 110000, + "increment-contract", + &max_contract_src, + ), + ); + run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 212, &conf); + + // Loop 20 times. Each time, execute the same transaction, but increase the amount *paid*. + // This will exercise the window size. + let mut response_estimated_costs = vec![]; + let mut response_top_fee_rates = vec![]; + for i in 1..21 { + submit_tx( + &http_origin, + &make_contract_call( + &spender_sk, + i, // nonce + i * 100000, // payment + &spender_addr.into(), + "increment-contract", + "increment-many", + &[], + ), + ); + run_until_burnchain_height( + &mut btc_regtest_controller, + &blocks_processed, + 212 + 2 * i, + &conf, + ); + + { + // Read from the fee estimation endpoin. + let path = format!("{}/v2/fees/transaction", &http_origin); + + let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { + address: spender_addr.clone().into(), + contract_name: ContractName::try_from("increment-contract").unwrap(), + function_name: ClarityName::try_from("increment-many").unwrap(), + function_args: vec![], + }); + + let payload_data = tx_payload.serialize_to_vec(); + let payload_hex = format!("0x{}", to_hex(&payload_data)); + + let body = json!({ "transaction_payload": payload_hex.clone() }); + + let client = reqwest::blocking::Client::new(); + let fee_rate_result = client + .post(&path) + .json(&body) + .send() + .expect("Should be able to post") + .json::() + .expect("Failed to parse result into JSON"); + + response_estimated_costs.push(fee_rate_result.estimated_cost_scalar); + response_top_fee_rates.push(fee_rate_result.estimations.last().unwrap().fee_rate); + } + } + + // Wait two extra blocks to be sure. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + assert_eq!(response_estimated_costs.len(), response_top_fee_rates.len()); + + // Check that: + // 1) The cost is always the same. + // 2) Fee rate grows monotonically. + for i in 1..response_estimated_costs.len() { + let curr_cost = response_estimated_costs[i]; + let last_cost = response_estimated_costs[i - 1]; + assert_eq!(curr_cost, last_cost); + + let curr_rate = response_top_fee_rates[i] as f64; + let last_rate = response_top_fee_rates[i - 1] as f64; + assert!(curr_rate >= last_rate); + } + + // Check the final value is near input parameter. + assert!(is_close_f64( + *response_top_fee_rates.last().unwrap(), + expected_final_value + )); + + channel.stop_chains_coordinator(); +} + +/// Test the FuzzedWeightedMedianFeeRate with window size 5 and randomness 0. We increase the +/// amount paid linearly each time. This estimate should grow *faster* than with window size 10. +#[test] +#[ignore] +fn fuzzed_median_fee_rate_estimation_test_window5() { + fuzzed_median_fee_rate_estimation_test(5, 202680.0992) +} + +/// Test the FuzzedWeightedMedianFeeRate with window size 10 and randomness 0. We increase the +/// amount paid linearly each time. This estimate should grow *slower* than with window size 5. +#[test] +#[ignore] +fn fuzzed_median_fee_rate_estimation_test_window10() { + fuzzed_median_fee_rate_estimation_test(10, 90080.5496) +} + +#[test] +#[ignore] +fn use_latest_tip_integration_test() { + // The purpose of this test is to check if setting the query parameter `tip` to `latest` is working + // as expected. Multiple endpoints accept this parameter, and in this test, we are using the + // GetContractSrc method to test it. + // + // The following scenarios are tested here: + // - The caller does not specify the tip paramater, and the canonical chain tip is used regardless of the + // state of the unconfirmed microblock stream. + // - The caller passes tip=latest with an existing unconfirmed microblock stream, and + // Clarity state from the unconfirmed microblock stream is successfully loaded. + // - The caller passes tip=latest with an empty unconfirmed microblock stream, and + // Clarity state from the canonical chain tip is successfully loaded (i.e. you don't + // get a 404 even though the unconfirmed chain tip points to a nonexistent MARF trie). + // + // Note: In this test, we are manually creating a microblock as well as reloading the unconfirmed + // state of the chainstate, instead of relying on `next_block_and_wait` to generate + // microblocks. We do this because the unconfirmed state is not automatically being initialized + // on the node, so attempting to validate any transactions against the expected unconfirmed + // state fails. + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let spender_stacks_addr = to_addr(&spender_sk); + let spender_addr: PrincipalData = spender_stacks_addr.into(); + + let (mut conf, _) = neon_integration_test_conf(); + + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 100300, + }); + + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 10_000; + conf.node.microblock_frequency = 1_000; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + + thread::spawn(move || run_loop.start(None, 0)); + + // Give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // First block wakes up the run loop. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // Second block will hold our VRF registration. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // Third block will be the first mined Stacks block. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // Let's query our first spender. + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.balance, 100300); + assert_eq!(account.nonce, 0); + + // this call wakes up our node + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // Open chainstate. + // TODO (hack) instantiate the sortdb in the burnchain + let _ = btc_regtest_controller.sortdb_mut(); + let (consensus_hash, stacks_block) = get_tip_anchored_block(&conf); + let tip_hash = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); + let (mut chainstate, _) = + StacksChainState::open(false, CHAIN_ID_TESTNET, &conf.get_chainstate_path_str()).unwrap(); + + // Initialize the unconfirmed state. + chainstate + .reload_unconfirmed_state(&btc_regtest_controller.sortdb_ref().index_conn(), tip_hash) + .unwrap(); + + // Make microblock with two transactions. + let recipient = StacksAddress::from_string(ADDR_4).unwrap(); + let transfer_tx = + make_stacks_transfer_mblock_only(&spender_sk, 0, 1000, &recipient.into(), 1000); + + let caller_src = " + (define-public (execute) + (ok stx-liquid-supply)) + "; + let publish_tx = + make_contract_publish_microblock_only(&spender_sk, 1, 1000, "caller", caller_src); + + let tx_1 = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); + let tx_2 = StacksTransaction::consensus_deserialize(&mut &publish_tx[..]).unwrap(); + let vec_tx = vec![tx_1, tx_2]; + let privk = + find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024).unwrap(); + let mblock = make_microblock( + &privk, + &mut chainstate, + &btc_regtest_controller.sortdb_ref().index_conn(), + consensus_hash, + stacks_block.clone(), + vec_tx, + ); + let mut mblock_bytes = vec![]; + mblock.consensus_serialize(&mut mblock_bytes).unwrap(); + + let client = reqwest::blocking::Client::new(); + + // Post the microblock + let path = format!("{}/v2/microblocks", &http_origin); + let res: String = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(mblock_bytes.clone()) + .send() + .unwrap() + .json() + .unwrap(); + + assert_eq!(res, format!("{}", &mblock.block_hash())); + + // Wait for the microblock to be accepted + sleep_ms(5_000); + let path = format!("{}/v2/info", &http_origin); + let mut iter_count = 0; + loop { + let tip_info = client + .get(&path) + .send() + .unwrap() + .json::() + .unwrap(); + eprintln!("{:#?}", tip_info); + if tip_info.unanchored_tip == Some(StacksBlockId([0; 32])) { + iter_count += 1; + assert!( + iter_count < 10, + "Hit retry count while waiting for net module to process pushed microblock" + ); + sleep_ms(5_000); + continue; + } else { + break; + } + } + + // Wait at least two p2p refreshes so it can produce the microblock. + for i in 0..30 { + info!( + "wait {} more seconds for microblock miner to find our transaction...", + 30 - i + ); + sleep_ms(1000); + } + + // Check event observer for new microblock event (expect 1). + let microblock_events = test_observer::get_microblocks(); + assert_eq!(microblock_events.len(), 1); + + // Don't set the tip parameter, and ask for the source of the contract we just defined in a microblock. + // This should fail because the anchored tip would be unaware of this contract. + let err_opt = get_contract_src( + &http_origin, + spender_stacks_addr, + "caller".to_string(), + false, + ); + match err_opt { + Ok(_) => { + panic!( + "Asking for the contract source off the anchored tip for a contract published \ + only in unconfirmed state should error." + ); + } + // Expect to get "NoSuchContract" because the function we are attempting to call is in a + // contract that only exists on unconfirmed state (and we did not set tip). + Err(err_str) => { + assert!(err_str.contains("No contract source data found")); + } + } + + // Set tip=latest, and ask for the source of the contract defined in the microblock. + // This should succeeed. + assert!(get_contract_src( + &http_origin, + spender_stacks_addr, + "caller".to_string(), + true, + ) + .is_ok()); + + // Mine an anchored block because now we want to have no unconfirmed state. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // Check that the underlying trie for the unconfirmed state does not exist. + assert!(chainstate.unconfirmed_state.is_some()); + let unconfirmed_state = chainstate.unconfirmed_state.as_mut().unwrap(); + let trie_exists = match unconfirmed_state + .clarity_inst + .trie_exists_for_block(&unconfirmed_state.unconfirmed_chain_tip) + { + Ok(res) => res, + Err(e) => { + panic!("error when determining whether or not trie exists: {:?}", e); + } + }; + assert!(!trie_exists); + + // Set tip=latest, and ask for the source of the contract defined in the previous epoch. + // The underlying MARF trie for the unconfirmed tip does not exist, so the transaction will be + // validated against the confirmed chain tip instead of the unconfirmed tip. This should be valid. + assert!(get_contract_src( + &http_origin, + spender_stacks_addr, + "caller".to_string(), + true, + ) + .is_ok()); +} + +#[test] +#[ignore] +fn test_flash_block_skip_tenure() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, miner_account) = neon_integration_test_conf(); + conf.miner.microblock_attempt_time_ms = 5_000; + conf.node.wait_time_for_microblocks = 0; + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let missed_tenures = run_loop.get_missed_tenures_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(None, 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // fault injection: force tenures to take 11 seconds + std::env::set_var("STX_TEST_SLOW_TENURE".to_string(), "11000".to_string()); + + for i in 0..10 { + // build one bitcoin block every 10 seconds + eprintln!("Build bitcoin block +{}", i); + btc_regtest_controller.build_next_block(1); + sleep_ms(10000); + } + + // at least one tenure was skipped + let num_skipped = missed_tenures.load(Ordering::SeqCst); + eprintln!("Skipped {} tenures", &num_skipped); + assert!(num_skipped > 1); + + // let's query the miner's account nonce: + + eprintln!("Miner account: {}", miner_account); + + let account = get_account(&http_origin, &miner_account); + assert_eq!(account.balance, 0); + assert_eq!(account.nonce, 2); + + channel.stop_chains_coordinator(); +}