diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index cd222a6e43b..791f4411747 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -23,7 +23,7 @@ concurrency: docker-build jobs: setup: name: Setup - runs-on: [ubuntu-latest] + runs-on: [ ubuntu-latest ] outputs: image_tag_suffix: ${{ steps.set.outputs.image_tag_suffix }} prover_fri_gpu_key_id: ${{ steps.extract-prover-fri-setup-key-ids.outputs.gpu_short_commit_sha }} @@ -48,7 +48,7 @@ jobs: build-push-core-images: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-core-template.yml if: contains(github.ref_name, 'core') secrets: @@ -60,7 +60,7 @@ jobs: build-push-tee-prover-images: name: Build and push images - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-tee-prover-template.yml if: contains(github.ref_name, 'core') secrets: @@ -72,7 +72,7 @@ jobs: build-push-contract-verifier: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-contract-verifier-template.yml if: contains(github.ref_name, 'contract_verifier') secrets: @@ -83,20 +83,20 @@ jobs: build-push-prover-images: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-prover-template.yml if: contains(github.ref_name, 'prover') with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} build-push-witness-generator-image-avx512: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-witness-generator-template.yml if: contains(github.ref_name, 'prover') with: @@ -110,7 +110,7 @@ jobs: build-gar-prover-fri-gpu: name: Build GAR prover FRI GPU - needs: [setup, build-push-prover-images] + needs: [ setup, build-push-prover-images ] uses: ./.github/workflows/build-prover-fri-gpu-gar.yml if: contains(github.ref_name, 'prover') with: diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 53bd1ab7a51..18cbc2c2afa 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -293,7 +293,7 @@ jobs: - name: Show revert.log logs if: always() - run: ci_run cat core/tests/revert-test/revert.log || true + run: ci_run cat logs/revert/default/server.log || true - name: Show upgrade.log logs if: always() @@ -382,7 +382,11 @@ jobs: - name: Run revert test run: | - ENABLE_CONSENSUS=${{ matrix.consensus }} DEPLOYMENT_MODE=${{ matrix.deployment_mode }} PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE" ci_run zk test i revert-en + ENABLE_CONSENSUS=${{ matrix.consensus }} \ + DEPLOYMENT_MODE=${{ matrix.deployment_mode }} \ + PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE" \ + ci_run zk test i revert-en + # test terminates the nodes, so we restart them. if [[ "${{ matrix.deployment_mode }}" == "Rollup" ]]; then ZKSYNC_ENV=docker ci_run zk server --components=$SERVER_COMPONENTS &>>server.log & @@ -414,13 +418,13 @@ jobs: if: always() run: ci_run cat ext-node.log || true - - name: Show revert_main.log logs + - name: Show revert logs (main node) if: always() - run: ci_run cat core/tests/revert-test/revert_main.log || true + run: ci_run cat logs/revert/en/default/server.log || true - - name: Show revert_ext.log logs + - name: Show revert logs (EN) if: always() - run: ci_run cat core/tests/revert-test/revert_ext.log || true + run: ci_run cat logs/revert/en/default/external_node.log || true - name: Show upgrade.log logs if: always() diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index 5f82df646c1..78e1e485caf 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -4,6 +4,11 @@ on: env: CLICOLOR: 1 + # We run multiple binaries in parallel, and by default they will try to utilize all the + # available CPUs. In tests, there is not much CPU-intensive work (rayon), but a lot of + # async work (tokio), so we prioritize tokio. + TOKIO_WORKER_THREADS: 4 + RAYON_NUM_THREADS: 2 jobs: lint: @@ -11,7 +16,7 @@ jobs: uses: ./.github/workflows/ci-core-lint-reusable.yml tests: - runs-on: [ matterlabs-ci-runner ] + runs-on: [ matterlabs-ci-runner-ultra-performance ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: diff --git a/Cargo.lock b/Cargo.lock index 2d6263f7ab4..b07724e23fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -778,9 +778,9 @@ dependencies = [ [[package]] name = "build_html" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3108fe6fe7ac796fb7625bdde8fa2b67b5a7731496251ca57c7b8cadd78a16a1" +checksum = "225eb82ce9e70dcc0cfa6e404d0f353326b6e163bf500ec4711cec317d11935c" [[package]] name = "bumpalo" @@ -5575,9 +5575,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -5704,9 +5704,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.189" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" dependencies = [ "serde_derive", ] @@ -5723,9 +5723,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -8143,7 +8143,6 @@ name = "zksync_circuit_breaker" version = "0.1.0" dependencies = [ "anyhow", - "assert_matches", "async-trait", "thiserror", "tokio", @@ -8188,9 +8187,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" dependencies = [ "anyhow", "once_cell", @@ -8225,9 +8224,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22e3bfe96fa30a57313e774a5e8c74ffee884abff57ecacc10e8832315ee8a2" +checksum = "b1dcab481683131c093271c19602bd495b1d682f7a94f764f2227111a0a104f0" dependencies = [ "anyhow", "async-trait", @@ -8247,9 +8246,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" +checksum = "ace39bdf50b8421c4d546381fe1ecc5212f953ce61cf93d4fa69172078dbe4af" dependencies = [ "anyhow", "blst", @@ -8271,13 +8270,14 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7fcde1275970a6b8a33ea2ade5cc994d6392f95509ce374e0e7a26cde4cd6db" +checksum = "216e3d9f3df8c119e037e44c41db12fa6448dafbf1eaf5015d13b22400866980" dependencies = [ "anyhow", "async-trait", "rand 0.8.5", + "semver", "tracing", "vise", "zksync_concurrency", @@ -8292,9 +8292,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ee48bee7dae8adb2769c7315adde1780832d05ecb6a77c08cdda53a315992a" +checksum = "19d7dd832b1bbcd0a2b977b2d85986437105fd5e1e82bd4becb2e6a9db112655" dependencies = [ "anyhow", "async-trait", @@ -8309,6 +8309,7 @@ dependencies = [ "pin-project", "prost 0.12.1", "rand 0.8.5", + "semver", "snow", "thiserror", "tls-listener", @@ -8327,9 +8328,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" +checksum = "06277266e31efdc1465f6a27ce96c7435392a270978a91956b8a848732df2cfa" dependencies = [ "anyhow", "bit-vec", @@ -8349,9 +8350,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" +checksum = "9099b2295f550176d824b5287f2f31b7739c4d10247faec1132f1c6e9d18059c" dependencies = [ "anyhow", "async-trait", @@ -8369,9 +8370,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", "rand 0.8.5", @@ -8396,7 +8397,6 @@ dependencies = [ "zksync_contracts", "zksync_dal", "zksync_eth_client", - "zksync_eth_sender", "zksync_health_check", "zksync_l1_contract_interface", "zksync_node_genesis", @@ -8463,7 +8463,6 @@ dependencies = [ "zksync_config", "zksync_contracts", "zksync_dal", - "zksync_env_config", "zksync_queued_job_processor", "zksync_types", "zksync_utils", @@ -8491,9 +8490,7 @@ dependencies = [ "serde_yaml", "tokio", "zksync_config", - "zksync_dal", "zksync_env_config", - "zksync_node_genesis", "zksync_protobuf", "zksync_protobuf_config", ] @@ -8522,9 +8519,6 @@ dependencies = [ "anyhow", "async-trait", "serde", - "tracing", - "zksync_config", - "zksync_types", ] [[package]] @@ -8595,7 +8589,6 @@ dependencies = [ "tracing", "vise", "zksync_basic_types", - "zksync_health_check", ] [[package]] @@ -8719,7 +8712,6 @@ dependencies = [ "envy", "futures 0.3.28", "rustc_version", - "semver", "serde", "serde_json", "tempfile", @@ -9043,6 +9035,7 @@ dependencies = [ "zksync_system_constants", "zksync_types", "zksync_utils", + "zksync_vm_executor", "zksync_web3_decl", ] @@ -9052,8 +9045,10 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "hex", "rand 0.8.5", "secrecy", + "semver", "tempfile", "test-casing", "thiserror", @@ -9078,11 +9073,13 @@ dependencies = [ "zksync_node_sync", "zksync_node_test_utils", "zksync_protobuf", + "zksync_state", "zksync_state_keeper", "zksync_system_constants", "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm_interface", "zksync_web3_decl", ] @@ -9096,7 +9093,6 @@ dependencies = [ "chrono", "serde", "serde_json", - "test-casing", "test-log", "tokio", "tracing", @@ -9123,7 +9119,6 @@ dependencies = [ "zksync_config", "zksync_dal", "zksync_eth_client", - "zksync_node_test_utils", "zksync_types", "zksync_utils", "zksync_web3_decl", @@ -9139,6 +9134,7 @@ dependencies = [ "ctrlc", "futures 0.3.28", "pin-project-lite", + "semver", "thiserror", "tokio", "tracing", @@ -9156,7 +9152,6 @@ dependencies = [ "zksync_da_dispatcher", "zksync_dal", "zksync_db_connection", - "zksync_env_config", "zksync_eth_client", "zksync_eth_sender", "zksync_eth_watch", @@ -9175,7 +9170,6 @@ dependencies = [ "zksync_node_sync", "zksync_object_store", "zksync_proof_data_handler", - "zksync_protobuf_config", "zksync_queued_job_processor", "zksync_reorg_detector", "zksync_state", @@ -9335,15 +9329,14 @@ dependencies = [ "zksync_multivm", "zksync_object_store", "zksync_prover_interface", - "zksync_tee_verifier", "zksync_types", ] [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" dependencies = [ "anyhow", "bit-vec", @@ -9362,9 +9355,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" dependencies = [ "anyhow", "heck 0.5.0", @@ -9649,13 +9642,10 @@ dependencies = [ "zksync_config", "zksync_contracts", "zksync_crypto_primitives", - "zksync_dal", - "zksync_db_connection", "zksync_merkle_tree", "zksync_multivm", "zksync_object_store", "zksync_prover_interface", - "zksync_queued_job_processor", "zksync_types", "zksync_utils", ] @@ -9738,7 +9728,6 @@ dependencies = [ "bincode", "futures 0.3.28", "hex", - "itertools 0.10.5", "num", "once_cell", "rand 0.8.5", @@ -9792,6 +9781,7 @@ dependencies = [ "zksync_dal", "zksync_multivm", "zksync_types", + "zksync_utils", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d244d436b9f..075f5007be4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -218,16 +218,16 @@ zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.4" } vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "4ef15d46410ffc11744771a3a6c7c09dd9470c90" } # Consensus dependencies. -zksync_concurrency = "=0.1.0-rc.11" -zksync_consensus_bft = "=0.1.0-rc.11" -zksync_consensus_crypto = "=0.1.0-rc.11" -zksync_consensus_executor = "=0.1.0-rc.11" -zksync_consensus_network = "=0.1.0-rc.11" -zksync_consensus_roles = "=0.1.0-rc.11" -zksync_consensus_storage = "=0.1.0-rc.11" -zksync_consensus_utils = "=0.1.0-rc.11" -zksync_protobuf = "=0.1.0-rc.11" -zksync_protobuf_build = "=0.1.0-rc.11" +zksync_concurrency = "=0.1.0-rc.12" +zksync_consensus_bft = "=0.1.0-rc.12" +zksync_consensus_crypto = "=0.1.0-rc.12" +zksync_consensus_executor = "=0.1.0-rc.12" +zksync_consensus_network = "=0.1.0-rc.12" +zksync_consensus_roles = "=0.1.0-rc.12" +zksync_consensus_storage = "=0.1.0-rc.12" +zksync_consensus_utils = "=0.1.0-rc.12" +zksync_protobuf = "=0.1.0-rc.12" +zksync_protobuf_build = "=0.1.0-rc.12" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 498b11b279b..a1d3951ff3d 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -59,7 +59,6 @@ envy.workspace = true url.workspace = true clap = { workspace = true, features = ["derive"] } serde_json.workspace = true -semver.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index c30cc1a432b..7b94ca7a0c2 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -242,7 +242,13 @@ impl ExternalNodeBuilder { let config = self.config.consensus.clone(); let secrets = config::read_consensus_secrets().context("config::read_consensus_secrets()")?; - let layer = ExternalNodeConsensusLayer { config, secrets }; + let layer = ExternalNodeConsensusLayer { + build_version: crate::metadata::SERVER_VERSION + .parse() + .context("CRATE_VERSION.parse()")?, + config, + secrets, + }; self.node.add_layer(layer); Ok(self) } diff --git a/core/bin/verified_sources_fetcher/README.md b/core/bin/verified_sources_fetcher/README.md new file mode 100644 index 00000000000..0abddb7a884 --- /dev/null +++ b/core/bin/verified_sources_fetcher/README.md @@ -0,0 +1,4 @@ +# Verified sources fetcher + +This tool downloads verified contract sources from SQL database from `contract_verification_requests` table. Then it +saves sources and compilation settings to files. diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 36ee7d990cf..e2a0c5846b5 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -364,6 +364,7 @@ impl MainNodeBuilder { subscriptions_limit: Some(rpc_config.subscriptions_limit()), batch_request_size_limit: Some(rpc_config.max_batch_request_size()), response_body_size_limit: Some(rpc_config.max_response_body_size()), + with_extended_tracing: rpc_config.extended_api_tracing, ..Default::default() }; self.node.add_layer(Web3ServerLayer::http( diff --git a/core/lib/circuit_breaker/Cargo.toml b/core/lib/circuit_breaker/Cargo.toml index 9bc00b475d4..926002e561c 100644 --- a/core/lib/circuit_breaker/Cargo.toml +++ b/core/lib/circuit_breaker/Cargo.toml @@ -19,6 +19,3 @@ tokio = { workspace = true, features = ["time"] } anyhow.workspace = true async-trait.workspace = true tracing.workspace = true - -[dev-dependencies] -assert_matches.workspace = true diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 50885a6ec6f..e5e01f880fe 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, BTreeSet}; use secrecy::{ExposeSecret as _, Secret}; -use zksync_basic_types::L2ChainId; +use zksync_basic_types::{ethabi, L2ChainId}; use zksync_concurrency::{limiter, time}; /// `zksync_consensus_crypto::TextFmt` representation of `zksync_consensus_roles::validator::PublicKey`. @@ -89,6 +89,8 @@ pub struct GenesisSpec { /// Leader of the committee. Represents /// `zksync_consensus_roles::validator::LeaderSelectionMode::Sticky`. pub leader: ValidatorPublicKey, + /// Address of the registry contract. + pub registry_address: Option, } #[derive(Clone, Debug, PartialEq, Default)] diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 028b5e38055..bc3b6025b15 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -243,17 +243,17 @@ impl Distribution for EncodeDist { default_upgrade_addr: rng.gen(), diamond_proxy_addr: rng.gen(), validator_timelock_addr: rng.gen(), - l1_erc20_bridge_proxy_addr: rng.gen(), - l2_erc20_bridge_addr: rng.gen(), - l1_shared_bridge_proxy_addr: rng.gen(), - l2_shared_bridge_addr: rng.gen(), - l1_weth_bridge_proxy_addr: rng.gen(), - l2_weth_bridge_addr: rng.gen(), - l2_testnet_paymaster_addr: rng.gen(), + l1_erc20_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_erc20_bridge_addr: self.sample_opt(|| rng.gen()), + l1_shared_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_shared_bridge_addr: self.sample_opt(|| rng.gen()), + l1_weth_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_weth_bridge_addr: self.sample_opt(|| rng.gen()), + l2_testnet_paymaster_addr: self.sample_opt(|| rng.gen()), l1_multicall3_addr: rng.gen(), - base_token_addr: rng.gen(), - chain_admin_addr: rng.gen(), ecosystem_contracts: self.sample(rng), + base_token_addr: self.sample_opt(|| rng.gen()), + chain_admin_addr: self.sample_opt(|| rng.gen()), } } } @@ -777,6 +777,7 @@ impl Distribution for EncodeDist { validators: self.sample_collect(rng), attesters: self.sample_collect(rng), leader: ValidatorPublicKey(self.sample(rng)), + registry_address: self.sample_opt(|| rng.gen()), } } } diff --git a/core/lib/contract_verifier/Cargo.toml b/core/lib/contract_verifier/Cargo.toml index 2803e3bb418..580982c9a70 100644 --- a/core/lib/contract_verifier/Cargo.toml +++ b/core/lib/contract_verifier/Cargo.toml @@ -13,7 +13,6 @@ categories.workspace = true [dependencies] zksync_types.workspace = true zksync_dal.workspace = true -zksync_env_config.workspace = true zksync_config.workspace = true zksync_contracts.workspace = true zksync_queued_job_processor.workspace = true diff --git a/core/lib/da_client/Cargo.toml b/core/lib/da_client/Cargo.toml index 589a077d4bf..a68d715eb57 100644 --- a/core/lib/da_client/Cargo.toml +++ b/core/lib/da_client/Cargo.toml @@ -12,9 +12,5 @@ categories.workspace = true [dependencies] serde = { workspace = true, features = ["derive"] } -tracing.workspace = true async-trait.workspace = true anyhow.workspace = true - -zksync_config.workspace = true -zksync_types.workspace = true diff --git a/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json b/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json deleted file mode 100644 index 3baa610d7d7..00000000000 --- a/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n genesis\n FROM\n consensus_replica_state\n WHERE\n fake_key\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "genesis", - "type_info": "Jsonb" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - true - ] - }, - "hash": "14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542" -} diff --git a/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json b/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json new file mode 100644 index 00000000000..28a1e54230d --- /dev/null +++ b/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n genesis,\n global_config\n FROM\n consensus_replica_state\n WHERE\n fake_key\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "genesis", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "global_config", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + true + ] + }, + "hash": "17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634" +} diff --git a/core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json b/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json similarity index 51% rename from core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json rename to core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json index 38b88c316ee..3817369ecc1 100644 --- a/core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json +++ b/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json @@ -1,15 +1,16 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n consensus_replica_state (fake_key, genesis, state)\n VALUES\n (TRUE, $1, $2)\n ", + "query": "\n INSERT INTO\n consensus_replica_state (fake_key, global_config, genesis, state)\n VALUES\n (TRUE, $1, $2, $3)\n ", "describe": { "columns": [], "parameters": { "Left": [ + "Jsonb", "Jsonb", "Jsonb" ] }, "nullable": [] }, - "hash": "f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975" + "hash": "1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85" } diff --git a/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json b/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json similarity index 58% rename from core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json rename to core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json index a42fbe98ff2..cabe0a3dc55 100644 --- a/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json +++ b/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ", "describe": { "columns": [], "parameters": { @@ -11,5 +11,5 @@ }, "nullable": [] }, - "hash": "d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510" + "hash": "311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1" } diff --git a/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json b/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json new file mode 100644 index 00000000000..ec17f2e0b61 --- /dev/null +++ b/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n attesters\n FROM\n l1_batches_consensus_committees\n WHERE\n l1_batch_number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "attesters", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1" +} diff --git a/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json b/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json deleted file mode 100644 index 5130763af73..00000000000 --- a/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(l1_batch_number) AS \"number\"\n FROM\n l1_batches_consensus\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - null - ] - }, - "hash": "849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c" -} diff --git a/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json b/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json new file mode 100644 index 00000000000..a59468bd516 --- /dev/null +++ b/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batch_number\n FROM\n l1_batches_consensus\n ORDER BY\n l1_batch_number DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97" +} diff --git a/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json b/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json new file mode 100644 index 00000000000..356fd8e9d99 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches_consensus_committees (l1_batch_number, attesters, updated_at)\n VALUES\n ($1, $2, NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n l1_batch_number = $1,\n attesters = $2,\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7" +} diff --git a/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql new file mode 100644 index 00000000000..fee0b42079f --- /dev/null +++ b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE consensus_replica_state DROP COLUMN global_config; + +DROP TABLE l1_batches_consensus_committees; diff --git a/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql new file mode 100644 index 00000000000..c31952b9646 --- /dev/null +++ b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE consensus_replica_state + ADD COLUMN global_config JSONB NULL; + +CREATE TABLE l1_batches_consensus_committees ( + l1_batch_number BIGINT PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, + attesters JSONB NOT NULL, + updated_at TIMESTAMP NOT NULL +); diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 658da6c7682..f0ef336bc54 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -22,6 +22,36 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::models::{parse_h160, parse_h256}; +/// Global config of the consensus. +#[derive(Debug, PartialEq, Clone)] +pub struct GlobalConfig { + pub genesis: validator::Genesis, + pub registry_address: Option, +} + +impl ProtoFmt for GlobalConfig { + type Proto = proto::GlobalConfig; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + registry_address: r + .registry_address + .as_ref() + .map(|a| parse_h160(a)) + .transpose() + .context("registry_address")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + } + } +} + /// Global attestation status served by /// `attestationStatus` RPC. #[derive(Debug, PartialEq, Clone)] @@ -469,3 +499,24 @@ impl ProtoRepr for proto::Transaction { } } } + +impl ProtoRepr for proto::AttesterCommittee { + type Type = attester::Committee; + + fn read(&self) -> anyhow::Result { + let members: Vec<_> = self + .members + .iter() + .enumerate() + .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) + .collect::>() + .context("members")?; + Self::Type::new(members) + } + + fn build(this: &Self::Type) -> Self { + Self { + members: this.iter().map(|x| x.build()).collect(), + } + } +} diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index ea0c12f1b5f..da9151f10f4 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package zksync.dal; import "zksync/roles/validator.proto"; +import "zksync/roles/attester.proto"; message Payload { // zksync-era ProtocolVersionId @@ -117,6 +118,15 @@ message PaymasterParams { optional bytes paymaster_input = 2; // required } +message AttesterCommittee { + repeated roles.attester.WeightedAttester members = 1; // required +} + +message GlobalConfig { + optional roles.validator.Genesis genesis = 1; // required + optional bytes registry_address = 2; // optional; H160 +} + message AttestationStatus { optional roles.validator.GenesisHash genesis = 1; // required optional uint64 next_batch_to_attest = 2; // required diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 8f05cb38177..2dca58e2a6a 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,5 +1,4 @@ use anyhow::Context as _; -use bigdecimal::Zero as _; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BlockStoreState, ReplicaState}; use zksync_db_connection::{ @@ -7,10 +6,10 @@ use zksync_db_connection::{ error::{DalError, DalResult, SqlxContext}, instrument::{InstrumentExt, Instrumented}, }; -use zksync_protobuf::ProtoFmt as _; +use zksync_protobuf::ProtoRepr as _; use zksync_types::L2BlockNumber; -pub use crate::consensus::{AttestationStatus, Payload}; +pub use crate::consensus::{proto, AttestationStatus, GlobalConfig, Payload}; use crate::{Core, CoreDal}; /// Storage access methods for `zksync_core::consensus` module. @@ -33,72 +32,77 @@ pub enum InsertCertificateError { } impl ConsensusDal<'_, '_> { - /// Fetches genesis. - pub async fn genesis(&mut self) -> DalResult> { - Ok(sqlx::query!( + /// Fetch consensus global config. + pub async fn global_config(&mut self) -> anyhow::Result> { + // global_config contains a superset of genesis information. + // genesis column is deprecated and will be removed once the main node + // is fully upgraded. + // For now we keep the information between both columns in sync. + let Some(row) = sqlx::query!( r#" SELECT - genesis + genesis, + global_config FROM consensus_replica_state WHERE fake_key "# ) - .try_map(|row| { - let Some(genesis) = row.genesis else { - return Ok(None); - }; - // Deserialize the json, but don't allow for unknown fields. - // We might encounter an unknown fields here in case if support for the previous - // consensus protocol version is removed before the migration to a new version - // is performed. The node should NOT operate in such a state. - Ok(Some( - validator::GenesisRaw::read( - &zksync_protobuf::serde::deserialize_proto_with_options( - &genesis, /*deny_unknown_fields=*/ true, - ) - .decode_column("genesis")?, - ) - .decode_column("genesis")? - .with_hash(), - )) - }) - .instrument("genesis") + .instrument("global_config") .fetch_optional(self.storage) .await? - .flatten()) + else { + return Ok(None); + }; + if let Some(global_config) = row.global_config { + return Ok(Some( + zksync_protobuf::serde::deserialize(&global_config).context("global_config")?, + )); + } + if let Some(genesis) = row.genesis { + let genesis: validator::Genesis = + zksync_protobuf::serde::deserialize(&genesis).context("genesis")?; + return Ok(Some(GlobalConfig { + genesis, + registry_address: None, + })); + } + Ok(None) } - /// Attempts to update the genesis. + /// Attempts to update the global config. /// Fails if the new genesis is invalid. /// Fails if the new genesis has different `chain_id`. /// Fails if the storage contains a newer genesis (higher fork number). - /// Noop if the new genesis is the same as the current one. + /// Noop if the new global config is the same as the current one. /// Resets the stored consensus state otherwise and purges all certificates. - pub async fn try_update_genesis(&mut self, genesis: &validator::Genesis) -> anyhow::Result<()> { + pub async fn try_update_global_config(&mut self, want: &GlobalConfig) -> anyhow::Result<()> { let mut txn = self.storage.start_transaction().await?; - if let Some(got) = txn.consensus_dal().genesis().await? { + if let Some(got) = txn.consensus_dal().global_config().await? { // Exit if the genesis didn't change. - if &got == genesis { + if &got == want { return Ok(()); } anyhow::ensure!( - got.chain_id == genesis.chain_id, + got.genesis.chain_id == want.genesis.chain_id, "changing chain_id is not allowed: old = {:?}, new = {:?}", - got.chain_id, - genesis.chain_id, + got.genesis.chain_id, + want.genesis.chain_id, ); anyhow::ensure!( - got.fork_number < genesis.fork_number, + got.genesis.fork_number < want.genesis.fork_number, "transition to a past fork is not allowed: old = {:?}, new = {:?}", - got.fork_number, - genesis.fork_number, + got.genesis.fork_number, + want.genesis.fork_number, ); - genesis.verify().context("genesis.verify()")?; + want.genesis.verify().context("genesis.verify()")?; } let genesis = - zksync_protobuf::serde::serialize(genesis, serde_json::value::Serializer).unwrap(); + zksync_protobuf::serde::serialize(&want.genesis, serde_json::value::Serializer) + .unwrap(); + let global_config = + zksync_protobuf::serde::serialize(want, serde_json::value::Serializer).unwrap(); let state = zksync_protobuf::serde::serialize( &ReplicaState::default(), serde_json::value::Serializer, @@ -131,14 +135,15 @@ impl ConsensusDal<'_, '_> { sqlx::query!( r#" INSERT INTO - consensus_replica_state (fake_key, genesis, state) + consensus_replica_state (fake_key, global_config, genesis, state) VALUES - (TRUE, $1, $2) + (TRUE, $1, $2, $3) "#, + global_config, genesis, state, ) - .instrument("try_update_genesis#INSERT INTO consenuss_replica_state") + .instrument("try_update_global_config#INSERT INTO consensus_replica_state") .execute(&mut txn) .await?; txn.commit().await?; @@ -154,25 +159,33 @@ impl ConsensusDal<'_, '_> { .start_transaction() .await .context("start_transaction")?; - let Some(old) = txn.consensus_dal().genesis().await.context("genesis()")? else { + let Some(old) = txn + .consensus_dal() + .global_config() + .await + .context("global_config()")? + else { return Ok(()); }; - let new = validator::GenesisRaw { - chain_id: old.chain_id, - fork_number: old.fork_number.next(), - first_block: txn - .consensus_dal() - .next_block() - .await - .context("next_block()")?, - - protocol_version: old.protocol_version, - validators: old.validators.clone(), - attesters: old.attesters.clone(), - leader_selection: old.leader_selection.clone(), - } - .with_hash(); - txn.consensus_dal().try_update_genesis(&new).await?; + let new = GlobalConfig { + genesis: validator::GenesisRaw { + chain_id: old.genesis.chain_id, + fork_number: old.genesis.fork_number.next(), + first_block: txn + .consensus_dal() + .next_block() + .await + .context("next_block()")?, + + protocol_version: old.genesis.protocol_version, + validators: old.genesis.validators.clone(), + attesters: old.genesis.attesters.clone(), + leader_selection: old.genesis.leader_selection.clone(), + } + .with_hash(), + registry_address: old.registry_address, + }; + txn.consensus_dal().try_update_global_config(&new).await?; txn.commit().await?; Ok(()) } @@ -259,7 +272,12 @@ impl ConsensusDal<'_, '_> { /// so it might NOT be the certificate for the last L2 block. pub async fn block_certificates_range(&mut self) -> anyhow::Result { // It cannot be older than genesis first block. - let mut start = self.genesis().await?.context("genesis()")?.first_block; + let mut start = self + .global_config() + .await? + .context("genesis()")? + .genesis + .first_block; start = start.max(self.first_block().await.context("first_block()")?); let row = sqlx::query!( r#" @@ -422,21 +440,96 @@ impl ConsensusDal<'_, '_> { Ok(()) } + /// Persist the attester committee for the given batch. + pub async fn upsert_attester_committee( + &mut self, + number: attester::BatchNumber, + committee: &attester::Committee, + ) -> anyhow::Result<()> { + let committee = proto::AttesterCommittee::build(committee); + let committee = + zksync_protobuf::serde::serialize_proto(&committee, serde_json::value::Serializer) + .unwrap(); + sqlx::query!( + r#" + INSERT INTO + l1_batches_consensus_committees (l1_batch_number, attesters, updated_at) + VALUES + ($1, $2, NOW()) + ON CONFLICT (l1_batch_number) DO + UPDATE + SET + l1_batch_number = $1, + attesters = $2, + updated_at = NOW() + "#, + i64::try_from(number.0).context("overflow")?, + committee + ) + .instrument("upsert_attester_committee") + .report_latency() + .execute(self.storage) + .await?; + Ok(()) + } + + /// Fetches the attester committee for the L1 batch with the given number. + pub async fn attester_committee( + &mut self, + n: attester::BatchNumber, + ) -> anyhow::Result> { + let Some(row) = sqlx::query!( + r#" + SELECT + attesters + FROM + l1_batches_consensus_committees + WHERE + l1_batch_number = $1 + "#, + i64::try_from(n.0)? + ) + .instrument("attester_committee") + .report_latency() + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + let raw = zksync_protobuf::serde::deserialize_proto(&row.attesters) + .context("deserialize_proto()")?; + Ok(Some( + proto::AttesterCommittee::read(&raw).context("read()")?, + )) + } + /// Inserts a certificate for the L1 batch. /// Noop if a certificate for the same L1 batch is already present. - /// No verification is performed - it cannot be performed due to circular dependency on + /// Verification against previously stored attester committee is performed. + /// Batch hash is not verified - it cannot be performed due to circular dependency on /// `zksync_l1_contract_interface`. pub async fn insert_batch_certificate( &mut self, cert: &attester::BatchQC, ) -> anyhow::Result<()> { - let res = sqlx::query!( + let cfg = self + .global_config() + .await + .context("global_config()")? + .context("genesis is missing")?; + let committee = self + .attester_committee(cert.message.number) + .await + .context("attester_committee()")? + .context("attester committee is missing")?; + cert.verify(cfg.genesis.hash(), &committee) + .context("cert.verify()")?; + sqlx::query!( r#" INSERT INTO - l1_batches_consensus (l1_batch_number, certificate, created_at, updated_at) + l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at) VALUES ($1, $2, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::try_from(cert.message.number.0).context("overflow")?, // Unwrap is ok, because serialization should always succeed. @@ -446,9 +539,6 @@ impl ConsensusDal<'_, '_> { .report_latency() .execute(self.storage) .await?; - if res.rows_affected().is_zero() { - tracing::debug!(l1_batch_number = ?cert.message.number, "duplicate batch certificate"); - } Ok(()) } @@ -457,24 +547,28 @@ impl ConsensusDal<'_, '_> { pub async fn last_batch_certificate_number( &mut self, ) -> anyhow::Result> { - let row = sqlx::query!( + let Some(row) = sqlx::query!( r#" SELECT - MAX(l1_batch_number) AS "number" + l1_batch_number FROM l1_batches_consensus + ORDER BY + l1_batch_number DESC + LIMIT + 1 "# ) .instrument("last_batch_certificate_number") .report_latency() - .fetch_one(self.storage) - .await?; - - let Some(n) = row.number else { + .fetch_optional(self.storage) + .await? + else { return Ok(None); }; + Ok(Some(attester::BatchNumber( - n.try_into().context("overflow")?, + row.l1_batch_number.try_into().context("overflow")?, ))) } @@ -529,7 +623,7 @@ impl ConsensusDal<'_, '_> { /// This is a main node only query. /// ENs should call the attestation_status RPC of the main node. pub async fn attestation_status(&mut self) -> anyhow::Result> { - let Some(genesis) = self.genesis().await.context("genesis()")? else { + let Some(cfg) = self.global_config().await.context("genesis()")? else { return Ok(None); }; let Some(next_batch_to_attest) = async { @@ -542,18 +636,21 @@ impl ConsensusDal<'_, '_> { return Ok(Some(last + 1)); } // Otherwise start with the batch containing the first block of the fork. - self.batch_of_block(genesis.first_block) + self.batch_of_block(cfg.genesis.first_block) .await .context("batch_of_block()") } .await? else { - tracing::info!(%genesis.first_block, "genesis block not found"); + tracing::info!(%cfg.genesis.first_block, "genesis block not found"); return Ok(None); }; Ok(Some(AttestationStatus { - genesis: genesis.hash(), - next_batch_to_attest, + genesis: cfg.genesis.hash(), + // We never attest batch 0 for technical reasons: + // * it is not supported to read state before batch 0. + // * the registry contract needs to be deployed before we can start operating on it + next_batch_to_attest: next_batch_to_attest.max(attester::BatchNumber(1)), })) } } @@ -563,8 +660,9 @@ mod tests { use rand::Rng as _; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::ReplicaState; - use zksync_types::{L1BatchNumber, ProtocolVersion}; + use zksync_types::ProtocolVersion; + use super::GlobalConfig; use crate::{ tests::{create_l1_batch_header, create_l2_block_header}, ConnectionPool, Core, CoreDal, @@ -575,19 +673,22 @@ mod tests { let rng = &mut rand::thread_rng(); let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); - assert_eq!(None, conn.consensus_dal().genesis().await.unwrap()); + assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); for n in 0..3 { let setup = validator::testonly::Setup::new(rng, 3); let mut genesis = (*setup.genesis).clone(); genesis.fork_number = validator::ForkNumber(n); - let genesis = genesis.with_hash(); + let cfg = GlobalConfig { + genesis: genesis.with_hash(), + registry_address: Some(rng.gen()), + }; conn.consensus_dal() - .try_update_genesis(&genesis) + .try_update_global_config(&cfg) .await .unwrap(); assert_eq!( - genesis, - conn.consensus_dal().genesis().await.unwrap().unwrap() + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() ); assert_eq!( ReplicaState::default(), @@ -597,8 +698,8 @@ mod tests { let want: ReplicaState = rng.gen(); conn.consensus_dal().set_replica_state(&want).await.unwrap(); assert_eq!( - genesis, - conn.consensus_dal().genesis().await.unwrap().unwrap() + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() ); assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); } @@ -608,14 +709,32 @@ mod tests { #[tokio::test] async fn test_batch_certificate() { let rng = &mut rand::thread_rng(); + let setup = validator::testonly::Setup::new(rng, 3); let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); + let cfg = GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: Some(rng.gen()), + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); - let mut mock_batch_qc = |number: L1BatchNumber| { - let mut cert: attester::BatchQC = rng.gen(); - cert.message.number.0 = u64::from(number.0); - cert.signatures.add(rng.gen(), rng.gen()); - cert + let mut make_cert = |number: attester::BatchNumber| { + let m = attester::Batch { + genesis: setup.genesis.hash(), + hash: rng.gen(), + number, + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } + attester::BatchQC { + message: m, + signatures: sigs, + } }; // Required for inserting l2 blocks @@ -627,8 +746,7 @@ mod tests { // Insert some mock L2 blocks and L1 batches let mut block_number = 0; let mut batch_number = 0; - let num_batches = 3; - for _ in 0..num_batches { + for _ in 0..3 { for _ in 0..3 { block_number += 1; let l2_block = create_l2_block_header(block_number); @@ -636,64 +754,56 @@ mod tests { } batch_number += 1; let l1_batch = create_l1_batch_header(batch_number); - conn.blocks_dal() .insert_mock_l1_batch(&l1_batch) .await .unwrap(); - conn.blocks_dal() .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) .await .unwrap(); } - let l1_batch_number = L1BatchNumber(batch_number); + let n = attester::BatchNumber(batch_number.into()); // Insert a batch certificate for the last L1 batch. - let cert1 = mock_batch_qc(l1_batch_number); - + let want = make_cert(n); conn.consensus_dal() - .insert_batch_certificate(&cert1) + .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) .await .unwrap(); - - // Try insert duplicate batch certificate for the same batch. - let cert2 = mock_batch_qc(l1_batch_number); - conn.consensus_dal() - .insert_batch_certificate(&cert2) + .insert_batch_certificate(&want) .await .unwrap(); + // Reinserting a cert should fail. + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n)) + .await + .is_err()); + // Retrieve the latest certificate. - let number = conn + let got_n = conn .consensus_dal() .last_batch_certificate_number() .await .unwrap() .unwrap(); - - let cert = conn + let got = conn .consensus_dal() - .batch_certificate(number) + .batch_certificate(got_n) .await .unwrap() .unwrap(); - - assert_eq!(cert, cert1, "duplicates are ignored"); + assert_eq!(got, want); // Try insert batch certificate for non-existing batch - let cert3 = mock_batch_qc(l1_batch_number.next()); - conn.consensus_dal() - .insert_batch_certificate(&cert3) - .await - .expect_err("missing payload"); - - // Insert one more L1 batch without a certificate. - conn.blocks_dal() - .insert_mock_l1_batch(&create_l1_batch_header(batch_number + 1)) + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n.next())) .await - .unwrap(); + .is_err()); } } diff --git a/core/lib/db_connection/Cargo.toml b/core/lib/db_connection/Cargo.toml index fa5bb0b20af..fb535d58232 100644 --- a/core/lib/db_connection/Cargo.toml +++ b/core/lib/db_connection/Cargo.toml @@ -12,7 +12,6 @@ categories.workspace = true [dependencies] zksync_basic_types.workspace = true -zksync_health_check.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index 3365f56add7..298c43b80cc 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -89,6 +89,7 @@ CONTRACTS_L2_ERC20_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L1_WETH_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_WETH_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_TESTNET_PAYMASTER_ADDR="FC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +CONTRACTS_L2_CONSENSUS_REGISTRY_ADDR="D64e136566a9E04eb05B30184fF577F52682D182" CONTRACTS_L1_MULTICALL3_ADDR="0xcA11bde05977b3631167028862bE2a173976CA11" CONTRACTS_L1_SHARED_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_SHARED_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" diff --git a/core/lib/multivm/src/tracers/mod.rs b/core/lib/multivm/src/tracers/mod.rs index 69501cf3988..35224d993a1 100644 --- a/core/lib/multivm/src/tracers/mod.rs +++ b/core/lib/multivm/src/tracers/mod.rs @@ -1,11 +1,6 @@ pub use self::{ - call_tracer::CallTracer, - multivm_dispatcher::TracerDispatcher, - prestate_tracer::PrestateTracer, - storage_invocation::StorageInvocations, - validator::{ - ValidationError, ValidationTracer, ValidationTracerParams, ViolatedValidationRule, - }, + call_tracer::CallTracer, multivm_dispatcher::TracerDispatcher, prestate_tracer::PrestateTracer, + storage_invocation::StorageInvocations, validator::ValidationTracer, }; mod call_tracer; diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index 307256792cf..a1573f24c66 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -11,10 +11,12 @@ use zksync_types::{ use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; use self::types::{NewTrustedValidationItems, ValidationTracerMode}; -pub use self::types::{ValidationError, ValidationTracerParams, ViolatedValidationRule}; use crate::{ glue::tracers::IntoOldVmTracer, - interface::storage::{StoragePtr, WriteStorage}, + interface::{ + storage::{StoragePtr, WriteStorage}, + tracer::{ValidationParams, ViolatedValidationRule}, + }, }; mod types; @@ -50,7 +52,7 @@ type ValidationRoundResult = Result ValidationTracer { pub fn new( - params: ValidationTracerParams, + params: ValidationParams, vm_version: VmVersion, ) -> (Self, Arc>) { let result = Arc::new(OnceCell::new()); @@ -179,8 +181,8 @@ impl ValidationTracer { } } - pub fn params(&self) -> ValidationTracerParams { - ValidationTracerParams { + pub fn params(&self) -> ValidationParams { + ValidationParams { user_address: self.user_address, paymaster_address: self.paymaster_address, trusted_slots: self.trusted_slots.clone(), diff --git a/core/lib/multivm/src/tracers/validator/types.rs b/core/lib/multivm/src/tracers/validator/types.rs index 418d2b89350..b9d44227992 100644 --- a/core/lib/multivm/src/tracers/validator/types.rs +++ b/core/lib/multivm/src/tracers/validator/types.rs @@ -1,9 +1,4 @@ -use std::{collections::HashSet, fmt, fmt::Display}; - -use zksync_types::{Address, H256, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::Halt; +use zksync_types::{Address, H256}; #[derive(Debug, Clone, Eq, PartialEq, Copy)] #[allow(clippy::enum_variant_names)] @@ -21,72 +16,3 @@ pub(super) struct NewTrustedValidationItems { pub(super) new_allowed_slots: Vec, pub(super) new_trusted_addresses: Vec
, } - -#[derive(Debug, Clone)] -pub struct ValidationTracerParams { - pub user_address: Address, - pub paymaster_address: Address, - /// Slots that are trusted (i.e. the user can access them). - pub trusted_slots: HashSet<(Address, U256)>, - /// Trusted addresses (the user can access any slots on these addresses). - pub trusted_addresses: HashSet
, - /// Slots, that are trusted and the value of them is the new trusted address. - /// They are needed to work correctly with beacon proxy, where the address of the implementation is - /// stored in the beacon. - pub trusted_address_slots: HashSet<(Address, U256)>, - /// Number of computational gas that validation step is allowed to use. - pub computational_gas_limit: u32, -} - -#[derive(Debug, Clone)] -pub enum ViolatedValidationRule { - TouchedUnallowedStorageSlots(Address, U256), - CalledContractWithNoCode(Address), - TouchedUnallowedContext, - TookTooManyComputationalGas(u32), -} - -impl fmt::Display for ViolatedValidationRule { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ViolatedValidationRule::TouchedUnallowedStorageSlots(contract, key) => write!( - f, - "Touched unallowed storage slots: address {}, key: {}", - hex::encode(contract), - hex::encode(u256_to_h256(*key)) - ), - ViolatedValidationRule::CalledContractWithNoCode(contract) => { - write!(f, "Called contract with no code: {}", hex::encode(contract)) - } - ViolatedValidationRule::TouchedUnallowedContext => { - write!(f, "Touched unallowed context") - } - ViolatedValidationRule::TookTooManyComputationalGas(gas_limit) => { - write!( - f, - "Took too many computational gas, allowed limit: {}", - gas_limit - ) - } - } - } -} - -#[derive(Debug, Clone)] -pub enum ValidationError { - FailedTx(Halt), - ViolatedRule(ViolatedValidationRule), -} - -impl Display for ValidationError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::FailedTx(revert_reason) => { - write!(f, "Validation revert: {}", revert_reason) - } - Self::ViolatedRule(rule) => { - write!(f, "Violated validation rules: {}", rule) - } - } - } -} diff --git a/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs b/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs index 2beca41fb48..d1ddb2b44c8 100644 --- a/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_4_1::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -88,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -102,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs b/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs index 3394a6c3f2b..a51644ff9ea 100644 --- a/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_4_1::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -88,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -102,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs b/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs index 53b5bf04d2e..7f9767a5e63 100644 --- a/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_4_0::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -88,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -102,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs b/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs index e963c79f4e4..c206bd6fb2a 100644 --- a/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_5_0::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -86,7 +86,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -100,7 +100,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs index 6107125d14d..0badd7c5877 100644 --- a/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_3_3::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -88,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -102,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs b/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs index bb166bedcda..86a639915c9 100644 --- a/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs @@ -9,12 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, + tracer::ViolatedValidationRule, VmExecutionResultAndLogs, }, tracers::{ dynamic::vm_1_3_3::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -87,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -101,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index b57f033d0d2..f5eb5c5b2f1 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -6,7 +6,7 @@ use zksync_config::configs::consensus::{ }; use zksync_protobuf::{kB, read_optional, repr::ProtoRepr, required, ProtoFmt}; -use crate::{proto::consensus as proto, read_optional_repr}; +use crate::{parse_h160, proto::consensus as proto, read_optional_repr}; impl ProtoRepr for proto::WeightedValidator { type Type = WeightedValidator; @@ -65,6 +65,12 @@ impl ProtoRepr for proto::GenesisSpec { .collect::>() .context("attesters")?, leader: ValidatorPublicKey(required(&self.leader).context("leader")?.clone()), + registry_address: self + .registry_address + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("registry_address")?, }) } fn build(this: &Self::Type) -> Self { @@ -74,6 +80,7 @@ impl ProtoRepr for proto::GenesisSpec { validators: this.validators.iter().map(ProtoRepr::build).collect(), attesters: this.attesters.iter().map(ProtoRepr::build).collect(), leader: Some(this.leader.0.clone()), + registry_address: this.registry_address.map(|a| format!("{:?}", a)), } } } diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index c64c993be7c..835ead1ab65 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -56,6 +56,8 @@ message GenesisSpec { repeated WeightedValidator validators = 3; // must be non-empty; validator committee. optional string leader = 4; // required; ValidatorPublicKey repeated WeightedAttester attesters = 5; // can be empty; attester committee. + // Currently not in consensus genesis, but still a part of the global configuration. + optional string registry_address = 6; // optional; H160 } // Per peer connection RPC rate limits. diff --git a/core/lib/tee_verifier/Cargo.toml b/core/lib/tee_verifier/Cargo.toml index a56f383bdba..6828eeef8b1 100644 --- a/core/lib/tee_verifier/Cargo.toml +++ b/core/lib/tee_verifier/Cargo.toml @@ -14,12 +14,9 @@ categories.workspace = true zksync_multivm.workspace = true zksync_config.workspace = true zksync_crypto_primitives.workspace = true -zksync_dal.workspace = true -zksync_db_connection.workspace = true zksync_merkle_tree.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true -zksync_queued_job_processor.workspace = true zksync_types.workspace = true zksync_utils.workspace = true diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index bf26caddd07..9391c862757 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -44,10 +44,23 @@ pub struct SyncBlock { pub protocol_version: ProtocolVersionId, } +/// Global configuration of the consensus served by the main node to the external nodes. +/// In particular, it contains consensus genesis. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::GlobalConfig`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConsensusGlobalConfig(pub serde_json::Value); + +/// [DEPRECATED] Genesis served by the main node to the external nodes. +/// This type is deprecated since ConsensusGlobalConfig also contains genesis and is extensible. +/// +/// The wrapped JSON value corresponds to `zksync_consensus_roles::validator::Genesis`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ConsensusGenesis(pub serde_json::Value); /// AttestationStatus maintained by the main node. /// Used for testing L1 batch signing by consensus attesters. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::AttestationStatus`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AttestationStatus(pub serde_json::Value); diff --git a/core/lib/utils/Cargo.toml b/core/lib/utils/Cargo.toml index 5ec27380df5..593952f16ac 100644 --- a/core/lib/utils/Cargo.toml +++ b/core/lib/utils/Cargo.toml @@ -25,7 +25,6 @@ thiserror.workspace = true futures.workspace = true hex.workspace = true reqwest = { workspace = true, features = ["blocking"] } -itertools.workspace = true serde_json.workspace = true once_cell.workspace = true diff --git a/core/lib/vm_executor/Cargo.toml b/core/lib/vm_executor/Cargo.toml index 9471e263bf4..089c2a9bcca 100644 --- a/core/lib/vm_executor/Cargo.toml +++ b/core/lib/vm_executor/Cargo.toml @@ -15,6 +15,7 @@ zksync_contracts.workspace = true zksync_dal.workspace = true zksync_types.workspace = true zksync_multivm.workspace = true +zksync_utils.workspace = true async-trait.workspace = true once_cell.workspace = true diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index d0201458446..68a3769ee62 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -18,9 +18,9 @@ use zksync_types::{vm::FastVmMode, Transaction}; use super::{ executor::{Command, MainBatchExecutor}, - metrics::{TxExecutionStage, BATCH_TIP_METRICS, KEEPER_METRICS}, + metrics::{TxExecutionStage, BATCH_TIP_METRICS, EXECUTOR_METRICS, KEEPER_METRICS}, }; -use crate::batch::metrics::{InteractionType, EXECUTOR_METRICS}; +use crate::shared::InteractionType; /// The default implementation of [`BatchExecutorFactory`]. /// Creates real batch executors which maintain the VM (as opposed to the test factories which don't use the VM). diff --git a/core/lib/vm_executor/src/batch/metrics.rs b/core/lib/vm_executor/src/batch/metrics.rs index 170ed471798..6851193e9be 100644 --- a/core/lib/vm_executor/src/batch/metrics.rs +++ b/core/lib/vm_executor/src/batch/metrics.rs @@ -5,6 +5,8 @@ use std::time::Duration; use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; use zksync_multivm::interface::VmExecutionResultAndLogs; +use crate::shared::InteractionType; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "command", rename_all = "snake_case")] pub(super) enum ExecutorCommand { @@ -26,13 +28,6 @@ pub(super) enum TxExecutionStage { TxRollback, } -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "interaction", rename_all = "snake_case")] -pub(super) enum InteractionType { - GetValue, - SetValue, -} - /// Executor-related metrics. #[derive(Debug, Metrics)] #[metrics(prefix = "state_keeper")] diff --git a/core/lib/vm_executor/src/lib.rs b/core/lib/vm_executor/src/lib.rs index 24fb3d8f7ee..1a0fbb002df 100644 --- a/core/lib/vm_executor/src/lib.rs +++ b/core/lib/vm_executor/src/lib.rs @@ -6,4 +6,6 @@ pub use zksync_multivm::interface::executor as interface; pub mod batch; +pub mod oneshot; +mod shared; pub mod storage; diff --git a/core/lib/vm_executor/src/oneshot/metrics.rs b/core/lib/vm_executor/src/oneshot/metrics.rs new file mode 100644 index 00000000000..8a89ce0a9a4 --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/metrics.rs @@ -0,0 +1,143 @@ +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; +use zksync_multivm::interface::{storage::StorageViewMetrics, VmMemoryMetrics}; + +use crate::shared::InteractionType; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "type", rename_all = "snake_case")] +enum SizeType { + Inner, + History, +} + +const MEMORY_SIZE_BUCKETS: Buckets = Buckets::values(&[ + 1_000.0, + 10_000.0, + 100_000.0, + 500_000.0, + 1_000_000.0, + 5_000_000.0, + 10_000_000.0, + 50_000_000.0, + 100_000_000.0, + 500_000_000.0, + 1_000_000_000.0, +]); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "runtime_context_memory")] +struct RuntimeContextMemoryMetrics { + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + event_sink_size: Family>, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + memory_size: Family>, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + decommitter_size: Family>, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + storage_size: Family>, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + storage_view_cache_size: Histogram, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + full: Histogram, +} + +#[vise::register] +static MEMORY_METRICS: vise::Global = vise::Global::new(); + +const INTERACTION_AMOUNT_BUCKETS: Buckets = Buckets::exponential(10.0..=10_000_000.0, 10.0); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "runtime_context_storage_interaction")] +struct RuntimeContextStorageMetrics { + #[metrics(buckets = INTERACTION_AMOUNT_BUCKETS)] + amount: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + duration: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + duration_per_unit: Family>, + #[metrics(buckets = Buckets::ZERO_TO_ONE)] + ratio: Histogram, +} + +#[vise::register] +static STORAGE_METRICS: vise::Global = vise::Global::new(); + +pub(super) fn report_vm_memory_metrics( + tx_id: &str, + memory_metrics: &VmMemoryMetrics, + vm_execution_took: Duration, + storage_metrics: StorageViewMetrics, +) { + MEMORY_METRICS.event_sink_size[&SizeType::Inner].observe(memory_metrics.event_sink_inner); + MEMORY_METRICS.event_sink_size[&SizeType::History].observe(memory_metrics.event_sink_history); + MEMORY_METRICS.memory_size[&SizeType::Inner].observe(memory_metrics.memory_inner); + MEMORY_METRICS.memory_size[&SizeType::History].observe(memory_metrics.memory_history); + MEMORY_METRICS.decommitter_size[&SizeType::Inner] + .observe(memory_metrics.decommittment_processor_inner); + MEMORY_METRICS.decommitter_size[&SizeType::History] + .observe(memory_metrics.decommittment_processor_history); + MEMORY_METRICS.storage_size[&SizeType::Inner].observe(memory_metrics.storage_inner); + MEMORY_METRICS.storage_size[&SizeType::History].observe(memory_metrics.storage_history); + + MEMORY_METRICS + .storage_view_cache_size + .observe(storage_metrics.cache_size); + MEMORY_METRICS + .full + .observe(memory_metrics.full_size() + storage_metrics.cache_size); + + let total_storage_invocations = storage_metrics.get_value_storage_invocations + + storage_metrics.set_value_storage_invocations; + let total_time_spent_in_storage = + storage_metrics.time_spent_on_get_value + storage_metrics.time_spent_on_set_value; + + STORAGE_METRICS.amount[&InteractionType::Missed] + .observe(storage_metrics.storage_invocations_missed); + STORAGE_METRICS.amount[&InteractionType::GetValue] + .observe(storage_metrics.get_value_storage_invocations); + STORAGE_METRICS.amount[&InteractionType::SetValue] + .observe(storage_metrics.set_value_storage_invocations); + STORAGE_METRICS.amount[&InteractionType::Total].observe(total_storage_invocations); + + STORAGE_METRICS.duration[&InteractionType::Missed] + .observe(storage_metrics.time_spent_on_storage_missed); + STORAGE_METRICS.duration[&InteractionType::GetValue] + .observe(storage_metrics.time_spent_on_get_value); + STORAGE_METRICS.duration[&InteractionType::SetValue] + .observe(storage_metrics.time_spent_on_set_value); + STORAGE_METRICS.duration[&InteractionType::Total].observe(total_time_spent_in_storage); + + if total_storage_invocations > 0 { + STORAGE_METRICS.duration_per_unit[&InteractionType::Total] + .observe(total_time_spent_in_storage.div_f64(total_storage_invocations as f64)); + } + if storage_metrics.storage_invocations_missed > 0 { + let duration_per_unit = storage_metrics + .time_spent_on_storage_missed + .div_f64(storage_metrics.storage_invocations_missed as f64); + STORAGE_METRICS.duration_per_unit[&InteractionType::Missed].observe(duration_per_unit); + } + + STORAGE_METRICS + .ratio + .observe(total_time_spent_in_storage.as_secs_f64() / vm_execution_took.as_secs_f64()); + + const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1_000; + + if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { + tracing::info!( + "Tx {tx_id} resulted in {total_storage_invocations} storage_invocations, {} new_storage_invocations, \ + {} get_value_storage_invocations, {} set_value_storage_invocations, \ + vm execution took {vm_execution_took:?}, storage interaction took {total_time_spent_in_storage:?} \ + (missed: {:?} get: {:?} set: {:?})", + storage_metrics.storage_invocations_missed, + storage_metrics.get_value_storage_invocations, + storage_metrics.set_value_storage_invocations, + storage_metrics.time_spent_on_storage_missed, + storage_metrics.time_spent_on_get_value, + storage_metrics.time_spent_on_set_value, + ); + } +} diff --git a/core/node/api_server/src/execution_sandbox/testonly.rs b/core/lib/vm_executor/src/oneshot/mock.rs similarity index 58% rename from core/node/api_server/src/execution_sandbox/testonly.rs rename to core/lib/vm_executor/src/oneshot/mock.rs index d9d60f52415..8f3a12603c1 100644 --- a/core/node/api_server/src/execution_sandbox/testonly.rs +++ b/core/lib/vm_executor/src/oneshot/mock.rs @@ -1,18 +1,18 @@ use std::fmt; use async_trait::async_trait; -#[cfg(test)] -use zksync_multivm::interface::ExecutionResult; use zksync_multivm::interface::{ - storage::ReadStorage, BytecodeCompressionError, OneshotEnv, TxExecutionMode, - VmExecutionResultAndLogs, + executor::{OneshotExecutor, TransactionValidator}, + storage::ReadStorage, + tracer::{ValidationError, ValidationParams}, + ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, + TxExecutionArgs, TxExecutionMode, VmExecutionResultAndLogs, }; -use zksync_types::Transaction; - -use super::{execute::TransactionExecutor, OneshotExecutor, TxExecutionArgs}; +use zksync_types::{l2::L2Tx, Transaction}; type TxResponseFn = dyn Fn(&Transaction, &OneshotEnv) -> VmExecutionResultAndLogs + Send + Sync; +/// Mock [`OneshotExecutor`] implementation. pub struct MockOneshotExecutor { call_responses: Box, tx_responses: Box, @@ -30,10 +30,7 @@ impl Default for MockOneshotExecutor { fn default() -> Self { Self { call_responses: Box::new(|tx, _| { - panic!( - "Unexpected call with data {}", - hex::encode(tx.execute.calldata()) - ); + panic!("Unexpected call with data {:?}", tx.execute.calldata()); }), tx_responses: Box::new(|tx, _| { panic!("Unexpect transaction call: {tx:?}"); @@ -43,23 +40,23 @@ impl Default for MockOneshotExecutor { } impl MockOneshotExecutor { - #[cfg(test)] - pub(crate) fn set_call_responses(&mut self, responses: F) + /// Sets call response closure used by this executor. + pub fn set_call_responses(&mut self, responses: F) where F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, { self.call_responses = self.wrap_responses(responses); } - #[cfg(test)] - pub(crate) fn set_tx_responses(&mut self, responses: F) + /// Sets transaction response closure used by this executor. The closure will be called both for transaction execution / validation, + /// and for gas estimation. + pub fn set_tx_responses(&mut self, responses: F) where F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, { self.tx_responses = self.wrap_responses(responses); } - #[cfg(test)] fn wrap_responses(&mut self, responses: F) -> Box where F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, @@ -76,8 +73,8 @@ impl MockOneshotExecutor { ) } - #[cfg(test)] - pub(crate) fn set_tx_responses_with_logs(&mut self, responses: F) + /// Same as [`Self::set_tx_responses()`], but allows to customize returned VM logs etc. + pub fn set_full_tx_responses(&mut self, responses: F) where F: Fn(&Transaction, &OneshotEnv) -> VmExecutionResultAndLogs + 'static + Send + Sync, { @@ -99,34 +96,41 @@ impl OneshotExecutor for MockOneshotExecutor where S: ReadStorage + Send + 'static, { - type Tracers = (); - - async fn inspect_transaction( + async fn inspect_transaction_with_bytecode_compression( &self, _storage: S, env: OneshotEnv, args: TxExecutionArgs, - (): Self::Tracers, - ) -> anyhow::Result { - Ok(self.mock_inspect(env, args)) + _params: OneshotTracingParams, + ) -> anyhow::Result { + Ok(OneshotTransactionExecutionResult { + tx_result: Box::new(self.mock_inspect(env, args)), + compression_result: Ok(()), + call_traces: vec![], + }) } +} - async fn inspect_transaction_with_bytecode_compression( +#[async_trait] +impl TransactionValidator for MockOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn validate_transaction( &self, _storage: S, env: OneshotEnv, - args: TxExecutionArgs, - (): Self::Tracers, - ) -> anyhow::Result<( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - )> { - Ok((Ok(()), self.mock_inspect(env, args))) - } -} - -impl From for TransactionExecutor { - fn from(executor: MockOneshotExecutor) -> Self { - Self::Mock(executor) + tx: L2Tx, + _validation_params: ValidationParams, + ) -> anyhow::Result> { + Ok( + match self + .mock_inspect(env, TxExecutionArgs::for_validation(tx)) + .result + { + ExecutionResult::Halt { reason } => Err(ValidationError::FailedTx(reason)), + ExecutionResult::Success { .. } | ExecutionResult::Revert { .. } => Ok(()), + }, + ) } } diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs new file mode 100644 index 00000000000..cac8edfdfdf --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -0,0 +1,291 @@ +//! Oneshot VM executor. + +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; + +use anyhow::Context; +use async_trait::async_trait; +use once_cell::sync::OnceCell; +use zksync_multivm::{ + interface::{ + executor::{OneshotExecutor, TransactionValidator}, + storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, + tracer::{ValidationError, ValidationParams}, + ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, + StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, VmInterface, + }, + tracers::{CallTracer, StorageInvocations, ValidationTracer}, + utils::adjust_pubdata_price_for_tx, + vm_latest::HistoryDisabled, + zk_evm_latest::ethereum_types::U256, + MultiVMTracer, VmInstance, +}; +use zksync_types::{ + block::pack_block_info, + get_nonce_key, + l2::L2Tx, + utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, + AccountTreeId, Nonce, StorageKey, Transaction, SYSTEM_CONTEXT_ADDRESS, + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +pub use self::mock::MockOneshotExecutor; + +mod metrics; +mod mock; + +/// Main [`OneshotExecutor`] implementation used by the API server. +#[derive(Debug, Default)] +pub struct MainOneshotExecutor { + missed_storage_invocation_limit: usize, + execution_latency_histogram: Option<&'static vise::Histogram>, +} + +impl MainOneshotExecutor { + /// Creates a new executor with the specified limit of cache misses for storage read operations (an anti-DoS measure). + /// The limit is applied for calls and gas estimations, but not during transaction validation. + pub fn new(missed_storage_invocation_limit: usize) -> Self { + Self { + missed_storage_invocation_limit, + execution_latency_histogram: None, + } + } + + /// Sets a histogram for measuring VM execution latency. + pub fn set_execution_latency_histogram( + &mut self, + histogram: &'static vise::Histogram, + ) { + self.execution_latency_histogram = Some(histogram); + } +} + +#[async_trait] +impl OneshotExecutor for MainOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + params: OneshotTracingParams, + ) -> anyhow::Result { + let missed_storage_invocation_limit = match env.system.execution_mode { + // storage accesses are not limited for tx validation + TxExecutionMode::VerifyExecute => usize::MAX, + TxExecutionMode::EthCall | TxExecutionMode::EstimateFee => { + self.missed_storage_invocation_limit + } + }; + let execution_latency_histogram = self.execution_latency_histogram; + + tokio::task::spawn_blocking(move || { + let mut tracers = vec![]; + let mut calls_result = Arc::>::default(); + if params.trace_calls { + tracers.push(CallTracer::new(calls_result.clone()).into_tracer_pointer()); + } + tracers.push( + StorageInvocations::new(missed_storage_invocation_limit).into_tracer_pointer(), + ); + + let executor = VmSandbox::new(storage, env, args, execution_latency_histogram); + let mut result = executor.apply(|vm, transaction| { + let (compression_result, tx_result) = vm + .inspect_transaction_with_bytecode_compression( + tracers.into(), + transaction, + true, + ); + OneshotTransactionExecutionResult { + tx_result: Box::new(tx_result), + compression_result: compression_result.map(drop), + call_traces: vec![], + } + }); + + result.call_traces = Arc::make_mut(&mut calls_result).take().unwrap_or_default(); + result + }) + .await + .context("VM execution panicked") + } +} + +#[async_trait] +impl TransactionValidator for MainOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn validate_transaction( + &self, + storage: S, + env: OneshotEnv, + tx: L2Tx, + validation_params: ValidationParams, + ) -> anyhow::Result> { + anyhow::ensure!( + env.system.execution_mode == TxExecutionMode::VerifyExecute, + "Unexpected execution mode for tx validation: {:?} (expected `VerifyExecute`)", + env.system.execution_mode + ); + let execution_latency_histogram = self.execution_latency_histogram; + + tokio::task::spawn_blocking(move || { + let (validation_tracer, mut validation_result) = + ValidationTracer::::new( + validation_params, + env.system.version.into(), + ); + let tracers = vec![validation_tracer.into_tracer_pointer()]; + + let executor = VmSandbox::new( + storage, + env, + TxExecutionArgs::for_validation(tx), + execution_latency_histogram, + ); + let exec_result = executor.apply(|vm, transaction| { + vm.push_transaction(transaction); + vm.inspect(tracers.into(), VmExecutionMode::OneTx) + }); + let validation_result = Arc::make_mut(&mut validation_result) + .take() + .map_or(Ok(()), Err); + + match (exec_result.result, validation_result) { + (_, Err(violated_rule)) => Err(ValidationError::ViolatedRule(violated_rule)), + (ExecutionResult::Halt { reason }, _) => Err(ValidationError::FailedTx(reason)), + _ => Ok(()), + } + }) + .await + .context("VM execution panicked") + } +} + +#[derive(Debug)] +struct VmSandbox { + vm: Box>, + storage_view: StoragePtr>, + transaction: Transaction, + execution_latency_histogram: Option<&'static vise::Histogram>, +} + +impl VmSandbox { + /// This method is blocking. + fn new( + storage: S, + mut env: OneshotEnv, + execution_args: TxExecutionArgs, + execution_latency_histogram: Option<&'static vise::Histogram>, + ) -> Self { + let mut storage_view = StorageView::new(storage); + Self::setup_storage_view(&mut storage_view, &execution_args, env.current_block); + + let protocol_version = env.system.version; + if execution_args.adjust_pubdata_price { + env.l1_batch.fee_input = adjust_pubdata_price_for_tx( + env.l1_batch.fee_input, + execution_args.transaction.gas_per_pubdata_byte_limit(), + env.l1_batch.enforced_base_fee.map(U256::from), + protocol_version.into(), + ); + }; + + let storage_view = storage_view.to_rc_ptr(); + let vm = Box::new(VmInstance::new_with_specific_version( + env.l1_batch, + env.system, + storage_view.clone(), + protocol_version.into_api_vm_version(), + )); + + Self { + vm, + storage_view, + transaction: execution_args.transaction, + execution_latency_histogram, + } + } + + /// This method is blocking. + fn setup_storage_view( + storage_view: &mut StorageView, + execution_args: &TxExecutionArgs, + current_block: Option, + ) { + let storage_view_setup_started_at = Instant::now(); + if let Some(nonce) = execution_args.enforced_nonce { + let nonce_key = get_nonce_key(&execution_args.transaction.initiator_account()); + let full_nonce = storage_view.read_value(&nonce_key); + let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); + let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); + storage_view.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); + } + + let payer = execution_args.transaction.payer(); + let balance_key = storage_key_for_eth_balance(&payer); + let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); + current_balance += execution_args.added_balance; + storage_view.set_value(balance_key, u256_to_h256(current_balance)); + + // Reset L2 block info if necessary. + if let Some(current_block) = current_block { + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let l2_block_info = + pack_block_info(current_block.number.into(), current_block.timestamp); + storage_view.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); + + let l2_block_txs_rolling_hash_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + storage_view.set_value( + l2_block_txs_rolling_hash_key, + current_block.txs_rolling_hash, + ); + } + + let storage_view_setup_time = storage_view_setup_started_at.elapsed(); + // We don't want to emit too many logs. + if storage_view_setup_time > Duration::from_millis(10) { + tracing::debug!("Prepared the storage view (took {storage_view_setup_time:?})",); + } + } + + pub(super) fn apply(mut self, apply_fn: F) -> T + where + F: FnOnce(&mut VmInstance, Transaction) -> T, + { + let tx_id = format!( + "{:?}-{}", + self.transaction.initiator_account(), + self.transaction.nonce().unwrap_or(Nonce(0)) + ); + + let started_at = Instant::now(); + let result = apply_fn(&mut *self.vm, self.transaction); + let vm_execution_took = started_at.elapsed(); + + if let Some(histogram) = self.execution_latency_histogram { + histogram.observe(vm_execution_took); + } + let memory_metrics = self.vm.record_vm_memory_metrics(); + metrics::report_vm_memory_metrics( + &tx_id, + &memory_metrics, + vm_execution_took, + self.storage_view.as_ref().borrow_mut().metrics(), + ); + result + } +} diff --git a/core/lib/vm_executor/src/shared.rs b/core/lib/vm_executor/src/shared.rs new file mode 100644 index 00000000000..420005be05d --- /dev/null +++ b/core/lib/vm_executor/src/shared.rs @@ -0,0 +1,12 @@ +//! Functionality shared among different types of executors. + +use vise::{EncodeLabelSet, EncodeLabelValue}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "interaction", rename_all = "snake_case")] +pub(crate) enum InteractionType { + Missed, + GetValue, + SetValue, + Total, +} diff --git a/core/lib/vm_interface/src/executor.rs b/core/lib/vm_interface/src/executor.rs index ee6665abfcb..119f975fecd 100644 --- a/core/lib/vm_interface/src/executor.rs +++ b/core/lib/vm_interface/src/executor.rs @@ -3,11 +3,13 @@ use std::fmt; use async_trait::async_trait; -use zksync_types::Transaction; +use zksync_types::{l2::L2Tx, Transaction}; use crate::{ - storage::StorageView, BatchTransactionExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, + storage::{ReadStorage, StorageView}, + tracer::{ValidationError, ValidationParams}, + BatchTransactionExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, OneshotEnv, + OneshotTracingParams, OneshotTransactionExecutionResult, SystemEnv, TxExecutionArgs, }; /// Factory of [`BatchExecutor`]s. @@ -42,3 +44,29 @@ pub trait BatchExecutor: 'static + Send + fmt::Debug { /// Finished the current L1 batch. async fn finish_batch(self: Box) -> anyhow::Result<(FinishedL1Batch, StorageView)>; } + +/// VM executor capable of executing isolated transactions / calls (as opposed to [batch execution](BatchExecutor)). +#[async_trait] +pub trait OneshotExecutor { + /// Executes a transaction or call with optional tracers. + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracing: OneshotTracingParams, + ) -> anyhow::Result; +} + +/// VM executor capable of validating transactions. +#[async_trait] +pub trait TransactionValidator: OneshotExecutor { + /// Validates the provided transaction. + async fn validate_transaction( + &self, + storage: S, + env: OneshotEnv, + tx: L2Tx, + validation_params: ValidationParams, + ) -> anyhow::Result>; +} diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index 315eb2bb36a..2b30f82e0ce 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -24,15 +24,16 @@ pub use crate::{ VmRevertReason, VmRevertReasonParsingError, }, inputs::{ - L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv, TxExecutionMode, - VmExecutionMode, + L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, StoredL2BlockEnv, SystemEnv, + TxExecutionArgs, TxExecutionMode, VmExecutionMode, }, outputs::{ BatchTransactionExecutionResult, BootloaderMemory, Call, CallType, CircuitStatistic, CompressedBytecodeInfo, CurrentExecutionState, DeduplicatedWritesMetrics, - ExecutionResult, FinishedL1Batch, L2Block, Refunds, TransactionExecutionMetrics, - TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionLogs, - VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, + ExecutionResult, FinishedL1Batch, L2Block, OneshotTransactionExecutionResult, Refunds, + TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, + VmExecutionLogs, VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, + VmMemoryMetrics, }, tracer, }, diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index 4801c4d88b5..24f58ae72f1 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -1,3 +1,7 @@ +use zksync_types::{ + l2::L2Tx, ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, +}; + pub use self::{ execution_mode::VmExecutionMode, l1_batch_env::L1BatchEnv, @@ -21,3 +25,71 @@ pub struct OneshotEnv { /// in the system context contract, which are set from `L1BatchEnv.first_l2_block` by default. pub current_block: Option, } + +/// Executor-independent arguments necessary to for oneshot transaction execution. +/// +/// # Developer guidelines +/// +/// Please don't add fields that duplicate `SystemEnv` or `L1BatchEnv` information, since both of these +/// are also provided to an executor. +#[derive(Debug)] +pub struct TxExecutionArgs { + /// Transaction / call itself. + pub transaction: Transaction, + /// Nonce override for the initiator account. + pub enforced_nonce: Option, + /// Balance added to the initiator account. + pub added_balance: U256, + /// If `true`, then the batch's L1 / pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= + /// to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the + /// current L1 prices for gas or pubdata. + pub adjust_pubdata_price: bool, +} + +impl TxExecutionArgs { + pub fn for_validation(tx: L2Tx) -> Self { + Self { + enforced_nonce: Some(tx.nonce()), + added_balance: U256::zero(), + adjust_pubdata_price: true, + transaction: tx.into(), + } + } + + pub fn for_eth_call(mut call: L2Tx) -> Self { + if call.common_data.signature.is_empty() { + call.common_data.signature = PackedEthSignature::default().serialize_packed().into(); + } + + Self { + enforced_nonce: None, + added_balance: U256::zero(), + adjust_pubdata_price: false, + transaction: call.into(), + } + } + + pub fn for_gas_estimate(transaction: Transaction) -> Self { + // For L2 transactions we need to explicitly put enough balance into the account of the users + // while for L1->L2 transactions the `to_mint` field plays this role + let added_balance = match &transaction.common_data { + ExecuteTransactionCommon::L2(data) => data.fee.gas_limit * data.fee.max_fee_per_gas, + ExecuteTransactionCommon::L1(_) => U256::zero(), + ExecuteTransactionCommon::ProtocolUpgrade(_) => U256::zero(), + }; + + Self { + enforced_nonce: transaction.nonce(), + added_balance, + adjust_pubdata_price: true, + transaction, + } + } +} + +/// Inputs and outputs for all tracers supported for oneshot transaction / call execution. +#[derive(Debug, Default)] +pub struct OneshotTracingParams { + /// Whether to trace contract calls. + pub trace_calls: bool, +} diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index d74d74652e2..6f9c02f0b58 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -11,7 +11,8 @@ use zksync_types::{ }; use crate::{ - CompressedBytecodeInfo, Halt, VmExecutionMetrics, VmExecutionStatistics, VmRevertReason, + BytecodeCompressionError, CompressedBytecodeInfo, Halt, VmExecutionMetrics, + VmExecutionStatistics, VmRevertReason, }; const L1_MESSAGE_EVENT_SIGNATURE: H256 = H256([ @@ -297,11 +298,14 @@ impl Call { } } -/// Mid-level transaction execution output returned by a batch executor. +/// Mid-level transaction execution output returned by a [batch executor](crate::executor::BatchExecutor). #[derive(Debug, Clone)] pub struct BatchTransactionExecutionResult { + /// VM result. pub tx_result: Box, + /// Compressed bytecodes used by the transaction. pub compressed_bytecodes: Vec, + /// Call traces (if requested; otherwise, empty). pub call_traces: Vec, } @@ -311,6 +315,17 @@ impl BatchTransactionExecutionResult { } } +/// Mid-level transaction execution output returned by a [oneshot executor](crate::executor::OneshotExecutor). +#[derive(Debug)] +pub struct OneshotTransactionExecutionResult { + /// VM result. + pub tx_result: Box, + /// Result of compressing bytecodes used by the transaction. + pub compression_result: Result<(), BytecodeCompressionError>, + /// Call traces (if requested; otherwise, empty). + pub call_traces: Vec, +} + /// High-level transaction execution result used by the API server sandbox etc. #[derive(Debug, Clone, PartialEq)] pub struct TransactionExecutionResult { diff --git a/core/lib/vm_interface/src/types/outputs/mod.rs b/core/lib/vm_interface/src/types/outputs/mod.rs index abefa59bbe7..1fa1cd5d168 100644 --- a/core/lib/vm_interface/src/types/outputs/mod.rs +++ b/core/lib/vm_interface/src/types/outputs/mod.rs @@ -1,9 +1,9 @@ pub use self::{ bytecode::CompressedBytecodeInfo, execution_result::{ - BatchTransactionExecutionResult, Call, CallType, ExecutionResult, Refunds, - TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionLogs, - VmExecutionResultAndLogs, + BatchTransactionExecutionResult, Call, CallType, ExecutionResult, + OneshotTransactionExecutionResult, Refunds, TransactionExecutionResult, TxExecutionStatus, + VmEvent, VmExecutionLogs, VmExecutionResultAndLogs, }, execution_state::{BootloaderMemory, CurrentExecutionState}, finished_l1batch::FinishedL1Batch, diff --git a/core/lib/vm_interface/src/types/tracer.rs b/core/lib/vm_interface/src/types/tracer.rs index 1b42b2eabbb..ba07772c7f2 100644 --- a/core/lib/vm_interface/src/types/tracer.rs +++ b/core/lib/vm_interface/src/types/tracer.rs @@ -1,3 +1,7 @@ +use std::{collections::HashSet, fmt}; + +use zksync_types::{Address, U256}; + use crate::Halt; #[derive(Debug, Clone, PartialEq)] @@ -37,3 +41,78 @@ pub enum VmExecutionStopReason { VmFinished, TracerRequestedStop(TracerExecutionStopReason), } + +/// Transaction validation parameters. +#[derive(Debug, Clone)] +pub struct ValidationParams { + pub user_address: Address, + pub paymaster_address: Address, + /// Slots that are trusted (i.e. the user can access them). + pub trusted_slots: HashSet<(Address, U256)>, + /// Trusted addresses (the user can access any slots on these addresses). + pub trusted_addresses: HashSet
, + /// Slots, that are trusted and the value of them is the new trusted address. + /// They are needed to work correctly with beacon proxy, where the address of the implementation is + /// stored in the beacon. + pub trusted_address_slots: HashSet<(Address, U256)>, + /// Number of computational gas that validation step is allowed to use. + pub computational_gas_limit: u32, +} + +/// Rules that can be violated when validating a transaction. +#[derive(Debug, Clone)] +pub enum ViolatedValidationRule { + /// The transaction touched disallowed storage slots during validation. + TouchedDisallowedStorageSlots(Address, U256), + /// The transaction called a contract without attached bytecode. + CalledContractWithNoCode(Address), + /// The transaction touched disallowed context. + TouchedDisallowedContext, + /// The transaction used too much gas during validation. + TookTooManyComputationalGas(u32), +} + +impl fmt::Display for ViolatedValidationRule { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ViolatedValidationRule::TouchedDisallowedStorageSlots(contract, key) => write!( + f, + "Touched disallowed storage slots: address {contract:x}, key: {key:x}", + ), + ViolatedValidationRule::CalledContractWithNoCode(contract) => { + write!(f, "Called contract with no code: {contract:x}") + } + ViolatedValidationRule::TouchedDisallowedContext => { + write!(f, "Touched disallowed context") + } + ViolatedValidationRule::TookTooManyComputationalGas(gas_limit) => { + write!( + f, + "Took too many computational gas, allowed limit: {gas_limit}" + ) + } + } + } +} + +/// Errors returned when validating a transaction. +#[derive(Debug)] +pub enum ValidationError { + /// VM execution was halted during validation. + FailedTx(Halt), + /// Transaction violated one of account validation rules. + ViolatedRule(ViolatedValidationRule), +} + +impl fmt::Display for ValidationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::FailedTx(revert_reason) => { + write!(f, "Validation revert: {}", revert_reason) + } + Self::ViolatedRule(rule) => { + write!(f, "Violated validation rules: {}", rule) + } + } + } +} diff --git a/core/lib/web3_decl/src/error.rs b/core/lib/web3_decl/src/error.rs index f42fe8de59d..3aa16a9ab77 100644 --- a/core/lib/web3_decl/src/error.rs +++ b/core/lib/web3_decl/src/error.rs @@ -60,6 +60,19 @@ pub struct EnrichedClientError { args: HashMap<&'static str, String>, } +/// Whether the error should be considered retriable. +pub fn is_retriable(err: &ClientError) -> bool { + match err { + ClientError::Transport(_) | ClientError::RequestTimeout => true, + ClientError::Call(err) => { + // At least some RPC providers use "internal error" in case of the server being overloaded + err.code() == ErrorCode::ServerIsBusy.code() + || err.code() == ErrorCode::InternalError.code() + } + _ => false, + } +} + /// Alias for a result with enriched client RPC error. pub type EnrichedClientResult = Result; @@ -87,15 +100,7 @@ impl EnrichedClientError { /// Whether the error should be considered retriable. pub fn is_retriable(&self) -> bool { - match self.as_ref() { - ClientError::Transport(_) | ClientError::RequestTimeout => true, - ClientError::Call(err) => { - // At least some RPC providers use "internal error" in case of the server being overloaded - err.code() == ErrorCode::ServerIsBusy.code() - || err.code() == ErrorCode::InternalError.code() - } - _ => false, - } + is_retriable(&self.inner_error) } } diff --git a/core/lib/web3_decl/src/namespaces/en.rs b/core/lib/web3_decl/src/namespaces/en.rs index dac774dd7bd..8a4d2db8c6f 100644 --- a/core/lib/web3_decl/src/namespaces/en.rs +++ b/core/lib/web3_decl/src/namespaces/en.rs @@ -25,6 +25,9 @@ pub trait EnNamespace { #[method(name = "consensusGenesis")] async fn consensus_genesis(&self) -> RpcResult>; + #[method(name = "consensusGlobalConfig")] + async fn consensus_global_config(&self) -> RpcResult>; + /// Lists all tokens created at or before the specified `block_number`. /// /// This method is used by EN after snapshot recovery in order to recover token records. diff --git a/core/lib/zksync_core_leftovers/Cargo.toml b/core/lib/zksync_core_leftovers/Cargo.toml index 4eab8823474..6aa6e6a8b43 100644 --- a/core/lib/zksync_core_leftovers/Cargo.toml +++ b/core/lib/zksync_core_leftovers/Cargo.toml @@ -11,12 +11,10 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_dal.workspace = true zksync_config.workspace = true zksync_protobuf.workspace = true zksync_protobuf_config.workspace = true zksync_env_config.workspace = true -zksync_node_genesis.workspace = true anyhow.workspace = true tokio = { workspace = true, features = ["time"] } diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index f7d40210b48..040e2a94a11 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -29,6 +29,7 @@ zksync_utils.workspace = true zksync_protobuf.workspace = true zksync_mini_merkle_tree.workspace = true zksync_multivm.workspace = true +zksync_vm_executor.workspace = true vise.workspace = true anyhow.workspace = true diff --git a/core/node/api_server/src/execution_sandbox/apply.rs b/core/node/api_server/src/execution_sandbox/apply.rs index 8b5cf69822b..0fbf8abc3dd 100644 --- a/core/node/api_server/src/execution_sandbox/apply.rs +++ b/core/node/api_server/src/execution_sandbox/apply.rs @@ -9,19 +9,12 @@ use std::time::{Duration, Instant}; use anyhow::Context as _; -use async_trait::async_trait; use tokio::runtime::Handle; use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_multivm::{ - interface::{ - storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, - BytecodeCompressionError, L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv, - TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - }, - tracers::StorageInvocations, - utils::{adjust_pubdata_price_for_tx, get_eth_call_gas_limit}, - vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryDisabled}, - MultiVMTracer, MultiVmTracerPointer, VmInstance, + interface::{L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv}, + utils::get_eth_call_gas_limit, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_state::PostgresStorage; use zksync_system_constants::{ @@ -30,18 +23,15 @@ use zksync_system_constants::{ }; use zksync_types::{ api, - block::{pack_block_info, unpack_block_info, L2BlockHasher}, + block::{unpack_block_info, L2BlockHasher}, fee_model::BatchFeeInput, - get_nonce_key, - utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, - AccountTreeId, L1BatchNumber, L2BlockNumber, Nonce, ProtocolVersionId, StorageKey, Transaction, - H256, U256, + AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, H256, U256, }; -use zksync_utils::{h256_to_u256, time::seconds_since_epoch, u256_to_h256}; +use zksync_utils::{h256_to_u256, time::seconds_since_epoch}; use super::{ - vm_metrics::{self, SandboxStage, SANDBOX_METRICS}, - ApiTracer, BlockArgs, OneshotExecutor, TxExecutionArgs, TxSetupArgs, + vm_metrics::{SandboxStage, SANDBOX_METRICS}, + BlockArgs, TxSetupArgs, }; pub(super) async fn prepare_env_and_storage( @@ -207,218 +197,6 @@ fn prepare_env( (system_env, l1_batch_env) } -// public for testing purposes -#[derive(Debug)] -pub(super) struct VmSandbox { - vm: Box>, - storage_view: StoragePtr>, - transaction: Transaction, -} - -impl VmSandbox { - /// This method is blocking. - pub fn new(storage: S, mut env: OneshotEnv, execution_args: TxExecutionArgs) -> Self { - let mut storage_view = StorageView::new(storage); - Self::setup_storage_view(&mut storage_view, &execution_args, env.current_block); - - let protocol_version = env.system.version; - if execution_args.adjust_pubdata_price { - env.l1_batch.fee_input = adjust_pubdata_price_for_tx( - env.l1_batch.fee_input, - execution_args.transaction.gas_per_pubdata_byte_limit(), - env.l1_batch.enforced_base_fee.map(U256::from), - protocol_version.into(), - ); - }; - - let storage_view = storage_view.to_rc_ptr(); - let vm = Box::new(VmInstance::new_with_specific_version( - env.l1_batch, - env.system, - storage_view.clone(), - protocol_version.into_api_vm_version(), - )); - - Self { - vm, - storage_view, - transaction: execution_args.transaction, - } - } - - /// This method is blocking. - fn setup_storage_view( - storage_view: &mut StorageView, - execution_args: &TxExecutionArgs, - current_block: Option, - ) { - let storage_view_setup_started_at = Instant::now(); - if let Some(nonce) = execution_args.enforced_nonce { - let nonce_key = get_nonce_key(&execution_args.transaction.initiator_account()); - let full_nonce = storage_view.read_value(&nonce_key); - let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); - let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); - storage_view.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); - } - - let payer = execution_args.transaction.payer(); - let balance_key = storage_key_for_eth_balance(&payer); - let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); - current_balance += execution_args.added_balance; - storage_view.set_value(balance_key, u256_to_h256(current_balance)); - - // Reset L2 block info if necessary. - if let Some(current_block) = current_block { - let l2_block_info_key = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let l2_block_info = - pack_block_info(current_block.number.into(), current_block.timestamp); - storage_view.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); - - let l2_block_txs_rolling_hash_key = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - storage_view.set_value( - l2_block_txs_rolling_hash_key, - current_block.txs_rolling_hash, - ); - } - - let storage_view_setup_time = storage_view_setup_started_at.elapsed(); - // We don't want to emit too many logs. - if storage_view_setup_time > Duration::from_millis(10) { - tracing::debug!("Prepared the storage view (took {storage_view_setup_time:?})",); - } - } - - fn wrap_tracers( - tracers: Vec, - env: &OneshotEnv, - missed_storage_invocation_limit: usize, - ) -> Vec, HistoryDisabled>> { - let storage_invocation_tracer = StorageInvocations::new(missed_storage_invocation_limit); - let protocol_version = env.system.version; - tracers - .into_iter() - .map(|tracer| tracer.into_boxed(protocol_version)) - .chain([storage_invocation_tracer.into_tracer_pointer()]) - .collect() - } - - pub(super) fn apply(mut self, apply_fn: F) -> T - where - F: FnOnce(&mut VmInstance, Transaction) -> T, - { - let tx_id = format!( - "{:?}-{}", - self.transaction.initiator_account(), - self.transaction.nonce().unwrap_or(Nonce(0)) - ); - - let execution_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Execution].start(); - let result = apply_fn(&mut *self.vm, self.transaction); - let vm_execution_took = execution_latency.observe(); - - let memory_metrics = self.vm.record_vm_memory_metrics(); - vm_metrics::report_vm_memory_metrics( - &tx_id, - &memory_metrics, - vm_execution_took, - self.storage_view.as_ref().borrow_mut().metrics(), - ); - result - } -} - -/// Main [`OneshotExecutor`] implementation used by the API server. -#[derive(Debug, Default)] -pub struct MainOneshotExecutor { - missed_storage_invocation_limit: usize, -} - -impl MainOneshotExecutor { - /// Creates a new executor with the specified limit of cache misses for storage read operations (an anti-DoS measure). - /// The limit is applied for calls and gas estimations, but not during transaction validation. - pub fn new(missed_storage_invocation_limit: usize) -> Self { - Self { - missed_storage_invocation_limit, - } - } -} - -#[async_trait] -impl OneshotExecutor for MainOneshotExecutor -where - S: ReadStorage + Send + 'static, -{ - type Tracers = Vec; - - async fn inspect_transaction( - &self, - storage: S, - env: OneshotEnv, - args: TxExecutionArgs, - tracers: Self::Tracers, - ) -> anyhow::Result { - let missed_storage_invocation_limit = match env.system.execution_mode { - // storage accesses are not limited for tx validation - TxExecutionMode::VerifyExecute => usize::MAX, - TxExecutionMode::EthCall | TxExecutionMode::EstimateFee => { - self.missed_storage_invocation_limit - } - }; - - tokio::task::spawn_blocking(move || { - let tracers = VmSandbox::wrap_tracers(tracers, &env, missed_storage_invocation_limit); - let executor = VmSandbox::new(storage, env, args); - executor.apply(|vm, transaction| { - vm.push_transaction(transaction); - vm.inspect(tracers.into(), VmExecutionMode::OneTx) - }) - }) - .await - .context("VM execution panicked") - } - - async fn inspect_transaction_with_bytecode_compression( - &self, - storage: S, - env: OneshotEnv, - args: TxExecutionArgs, - tracers: Self::Tracers, - ) -> anyhow::Result<( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - )> { - let missed_storage_invocation_limit = match env.system.execution_mode { - // storage accesses are not limited for tx validation - TxExecutionMode::VerifyExecute => usize::MAX, - TxExecutionMode::EthCall | TxExecutionMode::EstimateFee => { - self.missed_storage_invocation_limit - } - }; - - tokio::task::spawn_blocking(move || { - let tracers = VmSandbox::wrap_tracers(tracers, &env, missed_storage_invocation_limit); - let executor = VmSandbox::new(storage, env, args); - executor.apply(|vm, transaction| { - let (bytecodes_result, exec_result) = vm - .inspect_transaction_with_bytecode_compression( - tracers.into(), - transaction, - true, - ); - (bytecodes_result.map(drop), exec_result) - }) - }) - .await - .context("VM execution panicked") - } -} - async fn read_stored_l2_block( connection: &mut Connection<'_, Core>, l2_block_number: L2BlockNumber, @@ -467,15 +245,6 @@ impl BlockArgs { ) } - fn is_estimate_like(&self) -> bool { - matches!( - self.block_id, - api::BlockId::Number(api::BlockNumber::Pending) - | api::BlockId::Number(api::BlockNumber::Latest) - | api::BlockId::Number(api::BlockNumber::Committed) - ) - } - pub(crate) async fn default_eth_call_gas( &self, connection: &mut Connection<'_, Core>, @@ -529,7 +298,7 @@ impl BlockArgs { .context("resolved L2 block disappeared from storage")? }; - let historical_fee_input = if !self.is_estimate_like() { + let historical_fee_input = if !self.resolves_to_latest_sealed_l2_block() { let l2_block_header = connection .blocks_dal() .get_l2_block_header(self.resolved_block_number) diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 086a75c81de..d22d7de47d0 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -3,86 +3,27 @@ use async_trait::async_trait; use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{ - storage::ReadStorage, BytecodeCompressionError, OneshotEnv, TransactionExecutionMetrics, - VmExecutionResultAndLogs, -}; -use zksync_types::{ - api::state_override::StateOverride, l2::L2Tx, ExecuteTransactionCommon, Nonce, - PackedEthSignature, Transaction, U256, + executor::{OneshotExecutor, TransactionValidator}, + storage::ReadStorage, + tracer::{ValidationError, ValidationParams}, + Call, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, + TransactionExecutionMetrics, TxExecutionArgs, VmExecutionResultAndLogs, }; +use zksync_types::{api::state_override::StateOverride, l2::L2Tx}; +use zksync_vm_executor::oneshot::{MainOneshotExecutor, MockOneshotExecutor}; use super::{ - apply::{self, MainOneshotExecutor}, - storage::StorageWithOverrides, - testonly::MockOneshotExecutor, - vm_metrics, ApiTracer, BlockArgs, OneshotExecutor, TxSetupArgs, VmPermit, + apply, storage::StorageWithOverrides, vm_metrics, BlockArgs, TxSetupArgs, VmPermit, + SANDBOX_METRICS, }; - -/// Executor-independent arguments necessary to for oneshot transaction execution. -/// -/// # Developer guidelines -/// -/// Please don't add fields that duplicate `SystemEnv` or `L1BatchEnv` information, since both of these -/// are also provided to an executor. -#[derive(Debug)] -pub(crate) struct TxExecutionArgs { - /// Transaction / call itself. - pub transaction: Transaction, - /// Nonce override for the initiator account. - pub enforced_nonce: Option, - /// Balance added to the initiator account. - pub added_balance: U256, - /// If `true`, then the batch's L1 / pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= - /// to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the - /// current L1 prices for gas or pubdata. - pub adjust_pubdata_price: bool, -} - -impl TxExecutionArgs { - pub fn for_validation(tx: L2Tx) -> Self { - Self { - enforced_nonce: Some(tx.nonce()), - added_balance: U256::zero(), - adjust_pubdata_price: true, - transaction: tx.into(), - } - } - - pub fn for_eth_call(mut call: L2Tx) -> Self { - if call.common_data.signature.is_empty() { - call.common_data.signature = PackedEthSignature::default().serialize_packed().into(); - } - - Self { - enforced_nonce: None, - added_balance: U256::zero(), - adjust_pubdata_price: false, - transaction: call.into(), - } - } - - pub fn for_gas_estimate(transaction: Transaction) -> Self { - // For L2 transactions we need to explicitly put enough balance into the account of the users - // while for L1->L2 transactions the `to_mint` field plays this role - let added_balance = match &transaction.common_data { - ExecuteTransactionCommon::L2(data) => data.fee.gas_limit * data.fee.max_fee_per_gas, - ExecuteTransactionCommon::L1(_) => U256::zero(), - ExecuteTransactionCommon::ProtocolUpgrade(_) => U256::zero(), - }; - - Self { - enforced_nonce: transaction.nonce(), - added_balance, - adjust_pubdata_price: true, - transaction, - } - } -} +use crate::execution_sandbox::vm_metrics::SandboxStage; #[derive(Debug, Clone)] -pub(crate) struct TransactionExecutionOutput { +pub struct TransactionExecutionOutput { /// Output of the VM. pub vm: VmExecutionResultAndLogs, + /// Traced calls if requested. + pub call_traces: Vec, /// Execution metrics. pub metrics: TransactionExecutionMetrics, /// Were published bytecodes OK? @@ -91,7 +32,7 @@ pub(crate) struct TransactionExecutionOutput { /// Executor of transactions. #[derive(Debug)] -pub(crate) enum TransactionExecutor { +pub enum TransactionExecutor { Real(MainOneshotExecutor), #[doc(hidden)] // Intended for tests only Mock(MockOneshotExecutor), @@ -99,7 +40,10 @@ pub(crate) enum TransactionExecutor { impl TransactionExecutor { pub fn real(missed_storage_invocation_limit: usize) -> Self { - Self::Real(MainOneshotExecutor::new(missed_storage_invocation_limit)) + let mut executor = MainOneshotExecutor::new(missed_storage_invocation_limit); + executor + .set_execution_latency_histogram(&SANDBOX_METRICS.sandbox[&SandboxStage::Execution]); + Self::Real(executor) } /// This method assumes that (block with number `resolved_block_number` is present in DB) @@ -114,7 +58,7 @@ impl TransactionExecutor { connection: Connection<'static, Core>, block_args: BlockArgs, state_override: Option, - tracers: Vec, + tracing_params: OneshotTracingParams, ) -> anyhow::Result { let total_factory_deps = execution_args.transaction.execute.factory_deps.len() as u16; let (env, storage) = @@ -122,64 +66,91 @@ impl TransactionExecutor { let state_override = state_override.unwrap_or_default(); let storage = StorageWithOverrides::new(storage, &state_override); - let (published_bytecodes, execution_result) = self - .inspect_transaction_with_bytecode_compression(storage, env, execution_args, tracers) + let result = self + .inspect_transaction_with_bytecode_compression( + storage, + env, + execution_args, + tracing_params, + ) .await?; drop(vm_permit); let metrics = - vm_metrics::collect_tx_execution_metrics(total_factory_deps, &execution_result); + vm_metrics::collect_tx_execution_metrics(total_factory_deps, &result.tx_result); Ok(TransactionExecutionOutput { - vm: execution_result, + vm: *result.tx_result, + call_traces: result.call_traces, metrics, - are_published_bytecodes_ok: published_bytecodes.is_ok(), + are_published_bytecodes_ok: result.compression_result.is_ok(), }) } } +impl From for TransactionExecutor { + fn from(executor: MockOneshotExecutor) -> Self { + Self::Mock(executor) + } +} + #[async_trait] impl OneshotExecutor for TransactionExecutor where S: ReadStorage + Send + 'static, { - type Tracers = Vec; - - async fn inspect_transaction( + async fn inspect_transaction_with_bytecode_compression( &self, storage: S, env: OneshotEnv, args: TxExecutionArgs, - tracers: Self::Tracers, - ) -> anyhow::Result { + tracing_params: OneshotTracingParams, + ) -> anyhow::Result { match self { Self::Real(executor) => { executor - .inspect_transaction(storage, env, args, tracers) + .inspect_transaction_with_bytecode_compression( + storage, + env, + args, + tracing_params, + ) + .await + } + Self::Mock(executor) => { + executor + .inspect_transaction_with_bytecode_compression( + storage, + env, + args, + tracing_params, + ) .await } - Self::Mock(executor) => executor.inspect_transaction(storage, env, args, ()).await, } } +} - async fn inspect_transaction_with_bytecode_compression( +#[async_trait] +impl TransactionValidator for TransactionExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn validate_transaction( &self, storage: S, env: OneshotEnv, - args: TxExecutionArgs, - tracers: Self::Tracers, - ) -> anyhow::Result<( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - )> { + tx: L2Tx, + validation_params: ValidationParams, + ) -> anyhow::Result> { match self { Self::Real(executor) => { executor - .inspect_transaction_with_bytecode_compression(storage, env, args, tracers) + .validate_transaction(storage, env, tx, validation_params) .await } Self::Mock(executor) => { executor - .inspect_transaction_with_bytecode_compression(storage, env, args, ()) + .validate_transaction(storage, env, tx, validation_params) .await } } diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index f2a3f0e5f8c..79c6123642c 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -4,23 +4,18 @@ use std::{ }; use anyhow::Context as _; -use async_trait::async_trait; use rand::{thread_rng, Rng}; use zksync_dal::{pruning_dal::PruningInfo, Connection, Core, CoreDal, DalError}; -use zksync_multivm::interface::{ - storage::ReadStorage, BytecodeCompressionError, OneshotEnv, TxExecutionMode, - VmExecutionResultAndLogs, -}; +use zksync_multivm::interface::TxExecutionMode; use zksync_state::PostgresStorageCaches; use zksync_types::{ api, fee_model::BatchFeeInput, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, }; +pub use self::execute::TransactionExecutor; // FIXME (PLA-1018): remove use self::vm_metrics::SandboxStage; pub(super) use self::{ error::SandboxExecutionError, - execute::{TransactionExecutor, TxExecutionArgs}, - tracers::ApiTracer, validate::ValidationError, vm_metrics::{SubmitTxStage, SANDBOX_METRICS}, }; @@ -31,10 +26,8 @@ mod apply; mod error; mod execute; mod storage; -pub mod testonly; #[cfg(test)] mod tests; -mod tracers; mod validate; mod vm_metrics; @@ -158,7 +151,7 @@ async fn get_pending_state( /// Arguments for VM execution necessary to set up storage and environment. #[derive(Debug, Clone)] -pub(crate) struct TxSetupArgs { +pub struct TxSetupArgs { pub execution_mode: TxExecutionMode, pub operator_account: AccountTreeId, pub fee_input: BatchFeeInput, @@ -184,7 +177,7 @@ impl TxSetupArgs { caches: PostgresStorageCaches::new(1, 1), validation_computational_gas_limit: u32::MAX, chain_id: L2ChainId::default(), - whitelisted_tokens_for_aa: Vec::new(), + whitelisted_tokens_for_aa: vec![], enforced_base_fee: None, } } @@ -215,7 +208,7 @@ impl BlockStartInfoInner { /// Information about first L1 batch / L2 block in the node storage. #[derive(Debug, Clone)] -pub(crate) struct BlockStartInfo { +pub struct BlockStartInfo { cached_pruning_info: Arc>, max_cache_age: Duration, } @@ -331,7 +324,7 @@ impl BlockStartInfo { } #[derive(Debug, thiserror::Error)] -pub(crate) enum BlockArgsError { +pub enum BlockArgsError { #[error("Block is pruned; first retained block is {0}")] Pruned(L2BlockNumber), #[error("Block is missing, but can appear in the future")] @@ -342,7 +335,7 @@ pub(crate) enum BlockArgsError { /// Information about a block provided to VM. #[derive(Debug, Clone, Copy)] -pub(crate) struct BlockArgs { +pub struct BlockArgs { block_id: api::BlockId, resolved_block_number: L2BlockNumber, l1_batch_timestamp_s: Option, @@ -417,28 +410,3 @@ impl BlockArgs { ) } } - -/// VM executor capable of executing isolated transactions / calls (as opposed to batch execution). -#[async_trait] -trait OneshotExecutor { - type Tracers: Default; - - async fn inspect_transaction( - &self, - storage: S, - env: OneshotEnv, - args: TxExecutionArgs, - tracers: Self::Tracers, - ) -> anyhow::Result; - - async fn inspect_transaction_with_bytecode_compression( - &self, - storage: S, - env: OneshotEnv, - args: TxExecutionArgs, - tracers: Self::Tracers, - ) -> anyhow::Result<( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - )>; -} diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index da593292e2e..35103779a49 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -1,16 +1,31 @@ //! Tests for the VM execution sandbox. +use std::collections::HashMap; + use assert_matches::assert_matches; +use test_casing::test_casing; use zksync_dal::ConnectionPool; +use zksync_multivm::{ + interface::{ + executor::{OneshotExecutor, TransactionValidator}, + tracer::ValidationError, + Halt, OneshotTracingParams, TxExecutionArgs, + }, + utils::derive_base_fee_and_gas_per_pubdata, +}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; -use zksync_types::{api::state_override::StateOverride, Transaction}; - -use super::*; -use crate::{ - execution_sandbox::{apply::VmSandbox, storage::StorageWithOverrides}, - tx_sender::ApiContracts, +use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_types::{ + api::state_override::{OverrideAccount, StateOverride}, + fee::Fee, + l2::L2Tx, + transaction_request::PaymasterParams, + K256PrivateKey, Nonce, ProtocolVersionId, Transaction, U256, }; +use zksync_vm_executor::oneshot::MainOneshotExecutor; + +use super::{storage::StorageWithOverrides, *}; +use crate::tx_sender::ApiContracts; #[tokio::test] async fn creating_block_args() { @@ -167,7 +182,7 @@ async fn creating_block_args_after_snapshot_recovery() { } #[tokio::test] -async fn instantiating_vm() { +async fn estimating_gas() { let pool = ConnectionPool::::test_pool().await; let mut connection = pool.connection().await.unwrap(); insert_genesis_batch(&mut connection, &GenesisParams::mock()) @@ -188,24 +203,104 @@ async fn instantiating_vm() { } async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args: BlockArgs) { - let transaction = Transaction::from(create_l2_transaction(10, 100)); let estimate_gas_contracts = ApiContracts::load_from_disk().await.unwrap().estimate_gas; + let mut setup_args = TxSetupArgs::mock(TxExecutionMode::EstimateFee, estimate_gas_contracts); + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + setup_args.fee_input, + ProtocolVersionId::latest().into(), + ); + setup_args.enforced_base_fee = Some(base_fee); + let transaction = Transaction::from(create_transfer(base_fee, gas_per_pubdata)); let execution_args = TxExecutionArgs::for_gas_estimate(transaction.clone()); - let (env, storage) = apply::prepare_env_and_storage( - connection, - TxSetupArgs::mock(TxExecutionMode::EstimateFee, estimate_gas_contracts), - &block_args, - ) - .await - .unwrap(); + let (env, storage) = apply::prepare_env_and_storage(connection, setup_args, &block_args) + .await + .unwrap(); let storage = StorageWithOverrides::new(storage, &StateOverride::default()); - tokio::task::spawn_blocking(move || { - VmSandbox::new(storage, env, execution_args).apply(|_, received_tx| { - assert_eq!(received_tx, transaction); - }); - }) - .await - .expect("VM execution panicked") + let tracing_params = OneshotTracingParams::default(); + let output = MainOneshotExecutor::new(usize::MAX) + .inspect_transaction_with_bytecode_compression(storage, env, execution_args, tracing_params) + .await + .unwrap(); + output.compression_result.unwrap(); + let tx_result = *output.tx_result; + assert!(!tx_result.result.is_failed(), "{tx_result:#?}"); +} + +fn create_transfer(fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { + let fee = Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: fee_per_gas.into(), + max_priority_fee_per_gas: 0_u64.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }; + L2Tx::new_signed( + Address::random(), + vec![], + Nonce(0), + fee, + U256::zero(), + L2ChainId::default(), + &K256PrivateKey::random(), + vec![], + PaymasterParams::default(), + ) + .unwrap() +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn validating_transaction(set_balance: bool) { + let pool = ConnectionPool::::test_pool().await; + let mut connection = pool.connection().await.unwrap(); + insert_genesis_batch(&mut connection, &GenesisParams::mock()) + .await + .unwrap(); + + let block_args = BlockArgs::pending(&mut connection).await.unwrap(); + + let call_contracts = ApiContracts::load_from_disk().await.unwrap().eth_call; + let mut setup_args = TxSetupArgs::mock(TxExecutionMode::VerifyExecute, call_contracts); + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + setup_args.fee_input, + ProtocolVersionId::latest().into(), + ); + setup_args.enforced_base_fee = Some(base_fee); + let transaction = create_transfer(base_fee, gas_per_pubdata); + + let validation_params = + validate::get_validation_params(&mut connection, &transaction, u32::MAX, &[]) + .await + .unwrap(); + let (env, storage) = apply::prepare_env_and_storage(connection, setup_args, &block_args) + .await + .unwrap(); + let state_override = if set_balance { + let account_override = OverrideAccount { + balance: Some(U256::from(1) << 128), + ..OverrideAccount::default() + }; + StateOverride::new(HashMap::from([( + transaction.initiator_account(), + account_override, + )])) + } else { + StateOverride::default() + }; + let storage = StorageWithOverrides::new(storage, &state_override); + + let validation_result = MainOneshotExecutor::new(usize::MAX) + .validate_transaction(storage, env, transaction, validation_params) + .await + .unwrap(); + if set_balance { + validation_result.expect("validation failed"); + } else { + assert_matches!( + validation_result.unwrap_err(), + ValidationError::FailedTx(Halt::ValidationFailed(reason)) + if reason.to_string().contains("Not enough balance") + ); + } } diff --git a/core/node/api_server/src/execution_sandbox/tracers.rs b/core/node/api_server/src/execution_sandbox/tracers.rs deleted file mode 100644 index 31384b7a089..00000000000 --- a/core/node/api_server/src/execution_sandbox/tracers.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_multivm::{ - interface::{storage::WriteStorage, Call}, - tracers::{CallTracer, ValidationTracer, ValidationTracerParams, ViolatedValidationRule}, - vm_latest::HistoryDisabled, - MultiVMTracer, MultiVmTracerPointer, -}; -use zksync_types::ProtocolVersionId; - -/// Custom tracers supported by the API sandbox. -#[derive(Debug)] -pub(crate) enum ApiTracer { - CallTracer(Arc>>), - Validation { - params: ValidationTracerParams, - result: Arc>, - }, -} - -impl ApiTracer { - pub fn validation( - params: ValidationTracerParams, - ) -> (Self, Arc>) { - let result = Arc::>::default(); - let this = Self::Validation { - params, - result: result.clone(), - }; - (this, result) - } - - pub(super) fn into_boxed( - self, - protocol_version: ProtocolVersionId, - ) -> MultiVmTracerPointer - where - S: WriteStorage, - { - match self { - Self::CallTracer(traces) => CallTracer::new(traces).into_tracer_pointer(), - Self::Validation { params, result } => { - let (mut tracer, _) = - ValidationTracer::::new(params, protocol_version.into()); - tracer.result = result; - tracer.into_tracer_pointer() - } - } - } -} diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index a95cf6c3a91..e9087e608ee 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -3,9 +3,9 @@ use std::collections::HashSet; use anyhow::Context as _; use tracing::Instrument; use zksync_dal::{Connection, Core, CoreDal}; -use zksync_multivm::{ - interface::ExecutionResult, - tracers::{ValidationError as RawValidationError, ValidationTracerParams}, +use zksync_multivm::interface::{ + executor::TransactionValidator, + tracer::{ValidationError as RawValidationError, ValidationParams}, }; use zksync_types::{ api::state_override::StateOverride, l2::L2Tx, Address, TRUSTED_ADDRESS_SLOTS, @@ -17,7 +17,7 @@ use super::{ execute::TransactionExecutor, storage::StorageWithOverrides, vm_metrics::{SandboxStage, EXECUTION_METRICS, SANDBOX_METRICS}, - ApiTracer, BlockArgs, OneshotExecutor, TxExecutionArgs, TxSetupArgs, VmPermit, + BlockArgs, TxSetupArgs, VmPermit, }; /// Validation error used by the sandbox. Besides validation errors returned by VM, it also includes an internal error @@ -42,7 +42,7 @@ impl TransactionExecutor { computational_gas_limit: u32, ) -> Result<(), ValidationError> { let total_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start(); - let params = get_validation_params( + let validation_params = get_validation_params( &mut connection, &tx, computational_gas_limit, @@ -55,21 +55,14 @@ impl TransactionExecutor { apply::prepare_env_and_storage(connection, setup_args, &block_args).await?; let storage = StorageWithOverrides::new(storage, &StateOverride::default()); - let execution_args = TxExecutionArgs::for_validation(tx); - let (tracer, validation_result) = ApiTracer::validation(params); let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); - let result = self - .inspect_transaction(storage, env, execution_args, vec![tracer]) + let validation_result = self + .validate_transaction(storage, env, tx, validation_params) .instrument(tracing::debug_span!("validation")) .await?; drop(vm_permit); stage_latency.observe(); - let validation_result = match (result.result, validation_result.get()) { - (_, Some(rule)) => Err(RawValidationError::ViolatedRule(rule.clone())), - (ExecutionResult::Halt { reason }, _) => Err(RawValidationError::FailedTx(reason)), - (_, None) => Ok(()), - }; total_latency.observe(); validation_result.map_err(ValidationError::Vm) } @@ -78,12 +71,12 @@ impl TransactionExecutor { /// Some slots can be marked as "trusted". That is needed for slots which can not be /// trusted to change between validation and execution in general case, but /// sometimes we can safely rely on them to not change often. -async fn get_validation_params( +pub(super) async fn get_validation_params( connection: &mut Connection<'_, Core>, tx: &L2Tx, computational_gas_limit: u32, whitelisted_tokens_for_aa: &[Address], -) -> anyhow::Result { +) -> anyhow::Result { let method_latency = EXECUTION_METRICS.get_validation_params.start(); let user_address = tx.common_data.initiator_address; let paymaster_address = tx.common_data.paymaster_params.paymaster; @@ -122,7 +115,7 @@ async fn get_validation_params( span.exit(); method_latency.observe(); - Ok(ValidationTracerParams { + Ok(ValidationParams { user_address, paymaster_address, trusted_slots, diff --git a/core/node/api_server/src/execution_sandbox/vm_metrics.rs b/core/node/api_server/src/execution_sandbox/vm_metrics.rs index ffe87be899b..cbfe7e90bd0 100644 --- a/core/node/api_server/src/execution_sandbox/vm_metrics.rs +++ b/core/node/api_server/src/execution_sandbox/vm_metrics.rs @@ -4,77 +4,14 @@ use vise::{ Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LatencyObserver, Metrics, }; use zksync_multivm::{ - interface::{ - storage::StorageViewMetrics, TransactionExecutionMetrics, VmEvent, - VmExecutionResultAndLogs, VmMemoryMetrics, - }, + interface::{TransactionExecutionMetrics, VmEvent, VmExecutionResultAndLogs}, utils::StorageWritesDeduplicator, }; -use zksync_shared_metrics::InteractionType; use zksync_types::H256; use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::utils::ReportFilter; -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "type", rename_all = "snake_case")] -enum SizeType { - Inner, - History, -} - -const MEMORY_SIZE_BUCKETS: Buckets = Buckets::values(&[ - 1_000.0, - 10_000.0, - 100_000.0, - 500_000.0, - 1_000_000.0, - 5_000_000.0, - 10_000_000.0, - 50_000_000.0, - 100_000_000.0, - 500_000_000.0, - 1_000_000_000.0, -]); - -#[derive(Debug, Metrics)] -#[metrics(prefix = "runtime_context_memory")] -struct RuntimeContextMemoryMetrics { - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - event_sink_size: Family>, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - memory_size: Family>, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - decommitter_size: Family>, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - storage_size: Family>, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - storage_view_cache_size: Histogram, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - full: Histogram, -} - -#[vise::register] -static MEMORY_METRICS: vise::Global = vise::Global::new(); - -const INTERACTION_AMOUNT_BUCKETS: Buckets = Buckets::exponential(10.0..=10_000_000.0, 10.0); - -#[derive(Debug, Metrics)] -#[metrics(prefix = "runtime_context_storage_interaction")] -struct RuntimeContextStorageMetrics { - #[metrics(buckets = INTERACTION_AMOUNT_BUCKETS)] - amount: Family>, - #[metrics(buckets = Buckets::LATENCIES)] - duration: Family>, - #[metrics(buckets = Buckets::LATENCIES)] - duration_per_unit: Family>, - #[metrics(buckets = Buckets::ZERO_TO_ONE)] - ratio: Histogram, -} - -#[vise::register] -static STORAGE_METRICS: vise::Global = vise::Global::new(); - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] pub(super) enum SandboxStage { @@ -185,84 +122,6 @@ pub(super) struct ExecutionMetrics { #[vise::register] pub(super) static EXECUTION_METRICS: vise::Global = vise::Global::new(); -pub(super) fn report_vm_memory_metrics( - tx_id: &str, - memory_metrics: &VmMemoryMetrics, - vm_execution_took: Duration, - storage_metrics: StorageViewMetrics, -) { - MEMORY_METRICS.event_sink_size[&SizeType::Inner].observe(memory_metrics.event_sink_inner); - MEMORY_METRICS.event_sink_size[&SizeType::History].observe(memory_metrics.event_sink_history); - MEMORY_METRICS.memory_size[&SizeType::Inner].observe(memory_metrics.memory_inner); - MEMORY_METRICS.memory_size[&SizeType::History].observe(memory_metrics.memory_history); - MEMORY_METRICS.decommitter_size[&SizeType::Inner] - .observe(memory_metrics.decommittment_processor_inner); - MEMORY_METRICS.decommitter_size[&SizeType::History] - .observe(memory_metrics.decommittment_processor_history); - MEMORY_METRICS.storage_size[&SizeType::Inner].observe(memory_metrics.storage_inner); - MEMORY_METRICS.storage_size[&SizeType::History].observe(memory_metrics.storage_history); - - MEMORY_METRICS - .storage_view_cache_size - .observe(storage_metrics.cache_size); - MEMORY_METRICS - .full - .observe(memory_metrics.full_size() + storage_metrics.cache_size); - - let total_storage_invocations = storage_metrics.get_value_storage_invocations - + storage_metrics.set_value_storage_invocations; - let total_time_spent_in_storage = - storage_metrics.time_spent_on_get_value + storage_metrics.time_spent_on_set_value; - - STORAGE_METRICS.amount[&InteractionType::Missed] - .observe(storage_metrics.storage_invocations_missed); - STORAGE_METRICS.amount[&InteractionType::GetValue] - .observe(storage_metrics.get_value_storage_invocations); - STORAGE_METRICS.amount[&InteractionType::SetValue] - .observe(storage_metrics.set_value_storage_invocations); - STORAGE_METRICS.amount[&InteractionType::Total].observe(total_storage_invocations); - - STORAGE_METRICS.duration[&InteractionType::Missed] - .observe(storage_metrics.time_spent_on_storage_missed); - STORAGE_METRICS.duration[&InteractionType::GetValue] - .observe(storage_metrics.time_spent_on_get_value); - STORAGE_METRICS.duration[&InteractionType::SetValue] - .observe(storage_metrics.time_spent_on_set_value); - STORAGE_METRICS.duration[&InteractionType::Total].observe(total_time_spent_in_storage); - - if total_storage_invocations > 0 { - STORAGE_METRICS.duration_per_unit[&InteractionType::Total] - .observe(total_time_spent_in_storage.div_f64(total_storage_invocations as f64)); - } - if storage_metrics.storage_invocations_missed > 0 { - let duration_per_unit = storage_metrics - .time_spent_on_storage_missed - .div_f64(storage_metrics.storage_invocations_missed as f64); - STORAGE_METRICS.duration_per_unit[&InteractionType::Missed].observe(duration_per_unit); - } - - STORAGE_METRICS - .ratio - .observe(total_time_spent_in_storage.as_secs_f64() / vm_execution_took.as_secs_f64()); - - const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1_000; - - if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { - tracing::info!( - "Tx {tx_id} resulted in {total_storage_invocations} storage_invocations, {} new_storage_invocations, \ - {} get_value_storage_invocations, {} set_value_storage_invocations, \ - vm execution took {vm_execution_took:?}, storage interaction took {total_time_spent_in_storage:?} \ - (missed: {:?} get: {:?} set: {:?})", - storage_metrics.storage_invocations_missed, - storage_metrics.get_value_storage_invocations, - storage_metrics.set_value_storage_invocations, - storage_metrics.time_spent_on_storage_missed, - storage_metrics.time_spent_on_get_value, - storage_metrics.time_spent_on_set_value, - ); - } -} - pub(super) fn collect_tx_execution_metrics( contracts_deployed: u16, result: &VmExecutionResultAndLogs, diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 5f913e305cd..44eaae2e3ee 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -10,7 +10,10 @@ use zksync_dal::{ transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, }; use zksync_multivm::{ - interface::{TransactionExecutionMetrics, TxExecutionMode, VmExecutionResultAndLogs}, + interface::{ + OneshotTracingParams, TransactionExecutionMetrics, TxExecutionArgs, TxExecutionMode, + VmExecutionResultAndLogs, + }, utils::{ adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, get_max_batch_gas_limit, @@ -41,8 +44,8 @@ pub(super) use self::result::SubmitTxError; use self::{master_pool_sink::MasterPoolSink, tx_sink::TxSink}; use crate::{ execution_sandbox::{ - BlockArgs, SubmitTxStage, TransactionExecutor, TxExecutionArgs, TxSetupArgs, - VmConcurrencyBarrier, VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, + BlockArgs, SubmitTxStage, TransactionExecutor, TxSetupArgs, VmConcurrencyBarrier, + VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, }, tx_sender::result::ApiCallResult, }; @@ -140,6 +143,38 @@ impl MultiVMBaseSystemContracts { } } } + + pub fn load_estimate_gas_blocking() -> Self { + Self { + pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), + post_boojum: BaseSystemContracts::estimate_gas_post_boojum(), + post_allowlist_removal: BaseSystemContracts::estimate_gas_post_allowlist_removal(), + post_1_4_1: BaseSystemContracts::estimate_gas_post_1_4_1(), + post_1_4_2: BaseSystemContracts::estimate_gas_post_1_4_2(), + vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), + vm_1_5_0_increased_memory: + BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), + } + } + + pub fn load_eth_call_blocking() -> Self { + Self { + pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), + post_boojum: BaseSystemContracts::playground_post_boojum(), + post_allowlist_removal: BaseSystemContracts::playground_post_allowlist_removal(), + post_1_4_1: BaseSystemContracts::playground_post_1_4_1(), + post_1_4_2: BaseSystemContracts::playground_post_1_4_2(), + vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), + vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( + ), + } + } } /// Smart contracts to be used in the API sandbox requests, e.g. for estimating gas and @@ -169,32 +204,8 @@ impl ApiContracts { /// Blocking version of [`Self::load_from_disk()`]. pub fn load_from_disk_blocking() -> Self { Self { - estimate_gas: MultiVMBaseSystemContracts { - pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), - post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), - post_virtual_blocks_finish_upgrade_fix: - BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), - post_boojum: BaseSystemContracts::estimate_gas_post_boojum(), - post_allowlist_removal: BaseSystemContracts::estimate_gas_post_allowlist_removal(), - post_1_4_1: BaseSystemContracts::estimate_gas_post_1_4_1(), - post_1_4_2: BaseSystemContracts::estimate_gas_post_1_4_2(), - vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), - vm_1_5_0_increased_memory: - BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), - }, - eth_call: MultiVMBaseSystemContracts { - pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), - post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), - post_virtual_blocks_finish_upgrade_fix: - BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), - post_boojum: BaseSystemContracts::playground_post_boojum(), - post_allowlist_removal: BaseSystemContracts::playground_post_allowlist_removal(), - post_1_4_1: BaseSystemContracts::playground_post_1_4_1(), - post_1_4_2: BaseSystemContracts::playground_post_1_4_2(), - vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), - vm_1_5_0_increased_memory: - BaseSystemContracts::playground_post_1_5_0_increased_memory(), - }, + estimate_gas: MultiVMBaseSystemContracts::load_estimate_gas_blocking(), + eth_call: MultiVMBaseSystemContracts::load_eth_call_blocking(), } } } @@ -388,7 +399,7 @@ impl TxSender { connection, block_args, None, - vec![], + OneshotTracingParams::default(), ) .await?; tracing::info!( @@ -725,7 +736,7 @@ impl TxSender { connection, block_args, state_override, - vec![], + OneshotTracingParams::default(), ) .await?; Ok((execution_output.vm, execution_output.metrics)) @@ -1003,7 +1014,7 @@ impl TxSender { .await } - pub(super) async fn eth_call( + pub async fn eth_call( &self, block_args: BlockArgs, call_overrides: CallOverrides, @@ -1025,7 +1036,7 @@ impl TxSender { connection, block_args, state_override, - vec![], + OneshotTracingParams::default(), ) .await?; result.vm.into_api_call_result() diff --git a/core/node/api_server/src/tx_sender/tests.rs b/core/node/api_server/src/tx_sender/tests.rs index 5b2ab0495da..0ac3eb0b4f3 100644 --- a/core/node/api_server/src/tx_sender/tests.rs +++ b/core/node/api_server/src/tx_sender/tests.rs @@ -9,12 +9,10 @@ use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; use zksync_types::{api, get_nonce_key, L1BatchNumber, L2BlockNumber, StorageLog}; use zksync_utils::u256_to_h256; +use zksync_vm_executor::oneshot::MockOneshotExecutor; use super::*; -use crate::{ - execution_sandbox::{testonly::MockOneshotExecutor, BlockStartInfo}, - web3::testonly::create_test_tx_sender, -}; +use crate::{execution_sandbox::BlockStartInfo, web3::testonly::create_test_tx_sender}; #[tokio::test] async fn getting_nonce_for_account() { diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs index c3e116d3992..de763526373 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs @@ -19,6 +19,12 @@ impl EnNamespaceServer for EnNamespace { .map_err(|err| self.current_method().map_err(err)) } + async fn consensus_global_config(&self) -> RpcResult> { + self.consensus_global_config_impl() + .await + .map_err(|err| self.current_method().map_err(err)) + } + async fn consensus_genesis(&self) -> RpcResult> { self.consensus_genesis_impl() .await diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 473391476a3..ad00f6a878b 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -1,10 +1,9 @@ -use std::sync::Arc; - use anyhow::Context as _; -use once_cell::sync::OnceCell; use zksync_dal::{CoreDal, DalError}; use zksync_multivm::{ - interface::{Call, CallType, ExecutionResult, TxExecutionMode}, + interface::{ + Call, CallType, ExecutionResult, OneshotTracingParams, TxExecutionArgs, TxExecutionMode, + }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_system_constants::MAX_ENCODED_TX_SIZE; @@ -19,7 +18,7 @@ use zksync_types::{ use zksync_web3_decl::error::Web3Error; use crate::{ - execution_sandbox::{ApiTracer, TxExecutionArgs, TxSetupArgs}, + execution_sandbox::TxSetupArgs, tx_sender::{ApiContracts, TxSenderConfig}, web3::{backend_jsonrpsee::MethodTracer, state::RpcState}, }; @@ -190,11 +189,8 @@ impl DebugNamespace { let vm_permit = vm_permit.context("cannot acquire VM permit")?; // We don't need properly trace if we only need top call - let call_tracer_result = Arc::new(OnceCell::default()); - let custom_tracers = if only_top_call { - vec![] - } else { - vec![ApiTracer::CallTracer(call_tracer_result.clone())] + let tracing_params = OneshotTracingParams { + trace_calls: !only_top_call, }; let connection = self.state.acquire_connection().await?; @@ -207,12 +203,11 @@ impl DebugNamespace { connection, block_args, None, - custom_tracers, + tracing_params, ) - .await? - .vm; + .await?; - let (output, revert_reason) = match result.result { + let (output, revert_reason) = match result.vm.result { ExecutionResult::Success { output, .. } => (output, None), ExecutionResult::Revert { output } => (vec![], Some(output.to_string())), ExecutionResult::Halt { reason } => { @@ -223,19 +218,14 @@ impl DebugNamespace { } }; - // We had only one copy of Arc this arc is already dropped it's safe to unwrap - let trace = Arc::try_unwrap(call_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); let call = Call::new_high_level( tx.common_data.fee.gas_limit.as_u64(), - result.statistics.gas_used, + result.vm.statistics.gas_used, tx.execute.value, tx.execute.calldata, output, revert_reason, - trace, + result.call_traces, ); Ok(Self::map_call(call, false)) } diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index ca15352fd1a..26f4aa2b0b5 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -21,18 +21,35 @@ impl EnNamespace { Self { state } } + pub async fn consensus_global_config_impl( + &self, + ) -> Result, Web3Error> { + let mut conn = self.state.acquire_connection().await?; + let Some(cfg) = conn + .consensus_dal() + .global_config() + .await + .context("global_config()")? + else { + return Ok(None); + }; + Ok(Some(en::ConsensusGlobalConfig( + zksync_protobuf::serde::serialize(&cfg, serde_json::value::Serializer).unwrap(), + ))) + } + pub async fn consensus_genesis_impl(&self) -> Result, Web3Error> { let mut conn = self.state.acquire_connection().await?; - let Some(genesis) = conn + let Some(cfg) = conn .consensus_dal() - .genesis() + .global_config() .await - .map_err(DalError::generalize)? + .context("global_config()")? else { return Ok(None); }; Ok(Some(en::ConsensusGenesis( - zksync_protobuf::serde::serialize(&genesis, serde_json::value::Serializer).unwrap(), + zksync_protobuf::serde::serialize(&cfg.genesis, serde_json::value::Serializer).unwrap(), ))) } @@ -40,7 +57,7 @@ impl EnNamespace { pub async fn attestation_status_impl( &self, ) -> Result, Web3Error> { - let status = self + let Some(status) = self .state .acquire_connection() .await? @@ -54,13 +71,13 @@ impl EnNamespace { .context("TransactionBuilder::build()")? .consensus_dal() .attestation_status() - .await?; - - Ok(status.map(|s| { - en::AttestationStatus( - zksync_protobuf::serde::serialize(&s, serde_json::value::Serializer).unwrap(), - ) - })) + .await? + else { + return Ok(None); + }; + Ok(Some(en::AttestationStatus( + zksync_protobuf::serde::serialize(&status, serde_json::value::Serializer).unwrap(), + ))) } pub(crate) fn current_method(&self) -> &MethodTracer { diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 9f6b30b6026..a77498d4341 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -14,12 +14,10 @@ use zksync_types::{ fee_model::{BatchFeeInput, FeeParams}, L2ChainId, }; +use zksync_vm_executor::oneshot::MockOneshotExecutor; use super::{metrics::ApiTransportLabel, *}; -use crate::{ - execution_sandbox::{testonly::MockOneshotExecutor, TransactionExecutor}, - tx_sender::TxSenderConfig, -}; +use crate::{execution_sandbox::TransactionExecutor, tx_sender::TxSenderConfig}; const TEST_TIMEOUT: Duration = Duration::from_secs(90); const POLL_INTERVAL: Duration = Duration::from_millis(50); diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 5617b097c0c..635620e9c52 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -42,6 +42,7 @@ use zksync_types::{ U256, U64, }; use zksync_utils::u256_to_h256; +use zksync_vm_executor::oneshot::MockOneshotExecutor; use zksync_web3_decl::{ client::{Client, DynClient, L2}, jsonrpsee::{ @@ -57,10 +58,7 @@ use zksync_web3_decl::{ }; use super::*; -use crate::{ - execution_sandbox::testonly::MockOneshotExecutor, - web3::testonly::{spawn_http_server, spawn_ws_server}, -}; +use crate::web3::testonly::{spawn_http_server, spawn_ws_server}; mod debug; mod filters; diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 5b04250eebf..d8d1a2c7768 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -11,6 +11,7 @@ use zksync_types::{ L2ChainId, PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, U256, }; use zksync_utils::u256_to_h256; +use zksync_vm_executor::oneshot::MockOneshotExecutor; use zksync_web3_decl::namespaces::DebugNamespaceClient; use super::*; @@ -327,7 +328,7 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { total_log_queries_count: 0, }; - tx_executor.set_tx_responses_with_logs(move |tx, env| { + tx_executor.set_full_tx_responses(move |tx, env| { assert_eq!(tx.hash(), tx_bytes_and_hash.1); assert_eq!(env.l1_batch.first_l2_block.number, 1); diff --git a/core/node/consensus/Cargo.toml b/core/node/consensus/Cargo.toml index e82969dae6c..707bd957d81 100644 --- a/core/node/consensus/Cargo.toml +++ b/core/node/consensus/Cargo.toml @@ -20,6 +20,7 @@ zksync_consensus_storage.workspace = true zksync_consensus_executor.workspace = true zksync_consensus_bft.workspace = true zksync_consensus_utils.workspace = true +zksync_contracts.workspace = true zksync_protobuf.workspace = true zksync_dal.workspace = true zksync_l1_contract_interface.workspace = true @@ -31,22 +32,24 @@ zksync_system_constants.workspace = true zksync_types.workspace = true zksync_utils.workspace = true zksync_web3_decl.workspace = true - +zksync_node_api_server.workspace = true +zksync_state.workspace = true +zksync_vm_interface.workspace = true anyhow.workspace = true async-trait.workspace = true secrecy.workspace = true tempfile.workspace = true thiserror.workspace = true tracing.workspace = true +hex.workspace = true tokio.workspace = true +semver.workspace = true [dev-dependencies] zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true zksync_node_api_server.workspace = true zksync_test_account.workspace = true -zksync_contracts.workspace = true -tokio.workspace = true test-casing.workspace = true rand.workspace = true diff --git a/core/node/consensus/src/abi.rs b/core/node/consensus/src/abi.rs new file mode 100644 index 00000000000..0e2200e2803 --- /dev/null +++ b/core/node/consensus/src/abi.rs @@ -0,0 +1,133 @@ +//! Strongly-typed API for Consensus-related solidity contracts. +//! Placeholder until we can depend on alloy_sol_types. +use anyhow::Context as _; +use zksync_types::{ethabi, ethabi::Token}; + +/// Strongly typed representation of a contract function. +/// It also represents the inputs of the function. +pub trait Function { + /// Name of the solidity function. + const NAME: &'static str; + /// Type representing contract this function belongs to. + type Contract: AsRef; + /// Typ representing outputs of this function. + type Outputs; + /// Encodes this struct to inputs of this function. + fn encode(&self) -> Vec; + /// Decodes outputs of this function. + fn decode_outputs(outputs: Vec) -> anyhow::Result; +} + +/// Address of contract C. It is just a wrapper of ethabi::Address, +/// just additionally indicating what contract is deployed under this address. +#[derive(Debug)] +pub struct Address(ethabi::Address, std::marker::PhantomData); + +impl Clone for Address { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for Address {} + +impl PartialEq for Address { + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for Address {} + +impl Address { + pub fn new(address: ethabi::Address) -> Self { + Self(address, std::marker::PhantomData) + } +} + +impl std::ops::Deref for Address { + type Target = ethabi::Address; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// Represents a call to the function F. +#[derive(Debug)] +pub struct Call { + /// Contract of the function. + pub contract: F::Contract, + /// Inputs to the function. + pub inputs: F, +} + +impl Call { + pub(super) fn function(&self) -> ðabi::Function { + self.contract.as_ref().function(F::NAME).unwrap() + } + /// Converts the call to raw calldata. + pub fn calldata(&self) -> ethabi::Result { + self.function().encode_input(&self.inputs.encode()) + } + /// Parses the outputs of the call. + pub fn decode_outputs(&self, outputs: &[u8]) -> anyhow::Result { + F::decode_outputs( + self.function() + .decode_output(outputs) + .context("decode_output()")?, + ) + } +} + +pub(crate) fn into_fixed_bytes(t: Token) -> anyhow::Result<[u8; N]> { + match t { + Token::FixedBytes(b) => b.try_into().ok().context("bad size"), + bad => anyhow::bail!("want fixed_bytes, got {bad:?}"), + } +} + +pub(crate) fn into_tuple(t: Token) -> anyhow::Result<[Token; N]> { + match t { + Token::Tuple(ts) => ts.try_into().ok().context("bad size"), + bad => anyhow::bail!("want tuple, got {bad:?}"), + } +} + +pub(crate) fn into_uint>(t: Token) -> anyhow::Result { + match t { + Token::Uint(i) => i.try_into().ok().context("overflow"), + bad => anyhow::bail!("want uint, got {bad:?}"), + } +} + +#[cfg(test)] +fn example(t: ðabi::ParamType) -> Token { + use ethabi::ParamType as T; + match t { + T::Address => Token::Address(ethabi::Address::default()), + T::Bytes => Token::Bytes(ethabi::Bytes::default()), + T::Int(_) => Token::Int(ethabi::Int::default()), + T::Uint(_) => Token::Uint(ethabi::Uint::default()), + T::Bool => Token::Bool(bool::default()), + T::String => Token::String(String::default()), + T::Array(t) => Token::Array(vec![example(t)]), + T::FixedBytes(n) => Token::FixedBytes(vec![0; *n]), + T::FixedArray(t, n) => Token::FixedArray(vec![example(t); *n]), + T::Tuple(ts) => Token::Tuple(ts.iter().map(example).collect()), + } +} + +#[cfg(test)] +impl Call { + pub(crate) fn test(&self) -> anyhow::Result<()> { + self.calldata().context("calldata()")?; + F::decode_outputs( + self.function() + .outputs + .iter() + .map(|p| example(&p.kind)) + .collect(), + )?; + Ok(()) + } +} diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index c2fa1347206..22f8fc01192 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -11,6 +11,8 @@ use zksync_config::{ use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_executor as executor; use zksync_consensus_roles::{attester, node, validator}; +use zksync_dal::consensus_dal; +use zksync_types::ethabi; fn read_secret_text(text: Option<&Secret>) -> anyhow::Result> { text.map(|text| Text::new(text.expose_secret()).decode()) @@ -41,16 +43,18 @@ pub(super) struct GenesisSpec { pub(super) validators: validator::Committee, pub(super) attesters: Option, pub(super) leader_selection: validator::LeaderSelectionMode, + pub(super) registry_address: Option, } impl GenesisSpec { - pub(super) fn from_genesis(g: &validator::Genesis) -> Self { + pub(super) fn from_global_config(cfg: &consensus_dal::GlobalConfig) -> Self { Self { - chain_id: g.chain_id, - protocol_version: g.protocol_version, - validators: g.validators.clone(), - attesters: g.attesters.clone(), - leader_selection: g.leader_selection.clone(), + chain_id: cfg.genesis.chain_id, + protocol_version: cfg.genesis.protocol_version, + validators: cfg.genesis.validators.clone(), + attesters: cfg.genesis.attesters.clone(), + leader_selection: cfg.genesis.leader_selection.clone(), + registry_address: cfg.registry_address, } } @@ -93,6 +97,7 @@ impl GenesisSpec { } else { Some(attester::Committee::new(attesters).context("attesters")?) }, + registry_address: x.registry_address, }) } } @@ -104,6 +109,7 @@ pub(super) fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result, ) -> anyhow::Result { let mut gossip_static_outbound = HashMap::new(); { @@ -128,6 +134,7 @@ pub(super) fn executor( }; Ok(executor::Config { + build_version, server_addr: cfg.server_addr, public_addr: net::Host(cfg.public_addr.0.clone()), max_payload_size: cfg.max_payload_size, diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 259cac5d074..a52393c0f48 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -6,15 +6,20 @@ use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; use zksync_dal::consensus_dal; -use zksync_node_sync::{ - fetcher::FetchedBlock, sync_action::ActionQueueSender, MainNodeClient, SyncState, -}; -use zksync_protobuf::ProtoFmt as _; +use zksync_node_sync::{fetcher::FetchedBlock, sync_action::ActionQueueSender, SyncState}; use zksync_types::L2BlockNumber; -use zksync_web3_decl::client::{DynClient, L2}; +use zksync_web3_decl::{ + client::{DynClient, L2}, + error::is_retriable, + jsonrpsee::{core::ClientError, types::error::ErrorCode}, + namespaces::{EnNamespaceClient as _, EthNamespaceClient as _}, +}; use super::{config, storage::Store, ConsensusConfig, ConsensusSecrets}; -use crate::storage::{self, ConnectionPool}; +use crate::{ + registry, + storage::{self, ConnectionPool}, +}; /// External node. pub(super) struct EN { @@ -27,7 +32,7 @@ impl EN { /// Task running a consensus node for the external node. /// It may be a validator, but it cannot be a leader (cannot propose blocks). /// - /// NOTE: Before starting the consensus node if fetches all the blocks + /// NOTE: Before starting the consensus node it fetches all the blocks /// older than consensus genesis from the main node using json RPC. pub async fn run( self, @@ -35,6 +40,7 @@ impl EN { actions: ActionQueueSender, cfg: ConsensusConfig, secrets: ConsensusSecrets, + build_version: Option, ) -> anyhow::Result<()> { let attester = config::attester_key(&secrets).context("attester_key")?; @@ -47,13 +53,16 @@ impl EN { // Update sync state in the background. s.spawn_bg(self.fetch_state_loop(ctx)); - // Initialize genesis. - let genesis = self.fetch_genesis(ctx).await.wrap("fetch_genesis()")?; + // Initialize global config. + let global_config = self + .fetch_global_config(ctx) + .await + .wrap("fetch_global_config()")?; let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &genesis) + conn.try_update_global_config(ctx, &global_config) .await - .wrap("set_genesis()")?; + .wrap("try_update_global_config()")?; let mut payload_queue = conn .new_payload_queue(ctx, actions, self.sync_state.clone()) @@ -63,21 +72,25 @@ impl EN { drop(conn); // Fetch blocks before the genesis. - self.fetch_blocks(ctx, &mut payload_queue, Some(genesis.first_block)) - .await - .wrap("fetch_blocks()")?; + self.fetch_blocks( + ctx, + &mut payload_queue, + Some(global_config.genesis.first_block), + ) + .await + .wrap("fetch_blocks()")?; // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. s.spawn_bg::<()>({ - let old = genesis.clone(); + let old = global_config.clone(); async { let old = old; loop { - if let Ok(new) = self.fetch_genesis(ctx).await { + if let Ok(new) = self.fetch_global_config(ctx).await { if new != old { return Err(anyhow::format_err!( - "genesis changed: old {old:?}, new {new:?}" + "global config changed: old {old:?}, new {new:?}" ) .into()); } @@ -105,10 +118,14 @@ impl EN { s.spawn_bg(async { Ok(runner.run(ctx).await?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(self.run_attestation_updater(ctx, genesis.clone(), attestation.clone())); + s.spawn_bg(self.run_attestation_controller( + ctx, + global_config.clone(), + attestation.clone(), + )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets)?, + config: config::executor(&cfg, &secrets, build_version)?, block_store, batch_store, validator: config::validator_key(&secrets) @@ -164,24 +181,21 @@ impl EN { /// Monitors the `AttestationStatus` on the main node, /// and updates the attestation config accordingly. - async fn run_attestation_updater( + async fn run_attestation_controller( &self, ctx: &ctx::Ctx, - genesis: validator::Genesis, + cfg: consensus_dal::GlobalConfig, attestation: Arc, ) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); - let Some(committee) = &genesis.attesters else { - return Ok(()); - }; - let committee = Arc::new(committee.clone()); + let registry = registry::Registry::new(cfg.genesis.clone(), self.pool.clone()).await; let mut next = attester::BatchNumber(0); loop { let status = loop { match self.fetch_attestation_status(ctx).await { Err(err) => tracing::warn!("{err:#}"), Ok(status) => { - if status.genesis != genesis.hash() { + if status.genesis != cfg.genesis.hash() { return Err(anyhow::format_err!("genesis mismatch").into()); } if status.next_batch_to_attest >= next { @@ -191,6 +205,7 @@ impl EN { } ctx.sleep(POLL_INTERVAL).await?; }; + next = status.next_batch_to_attest.next(); tracing::info!( "waiting for hash of batch {:?}", status.next_batch_to_attest @@ -199,6 +214,27 @@ impl EN { .pool .wait_for_batch_hash(ctx, status.next_batch_to_attest) .await?; + let Some(committee) = registry + .attester_committee_for( + ctx, + cfg.registry_address.map(registry::Address::new), + status.next_batch_to_attest, + ) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + self.pool + .connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; tracing::info!( "attesting batch {:?} with hash {hash:?}", status.next_batch_to_attest @@ -214,7 +250,6 @@ impl EN { })) .await .context("start_attestation()")?; - next = status.next_batch_to_attest.next(); } } @@ -224,37 +259,52 @@ impl EN { const DELAY_INTERVAL: time::Duration = time::Duration::milliseconds(500); const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); loop { - match ctx.wait(self.client.fetch_l2_block_number()).await? { + match ctx.wait(self.client.get_block_number()).await? { Ok(head) => { + let head = L2BlockNumber(head.try_into().ok().context("overflow")?); self.sync_state.set_main_node_block(head); ctx.sleep(DELAY_INTERVAL).await?; } Err(err) => { - tracing::warn!("main_node_client.fetch_l2_block_number(): {err}"); + tracing::warn!("get_block_number(): {err}"); ctx.sleep(RETRY_INTERVAL).await?; } } } } - /// Fetches genesis from the main node. + /// Fetches consensus global configuration from the main node. #[tracing::instrument(skip_all)] - async fn fetch_genesis(&self, ctx: &ctx::Ctx) -> ctx::Result { + async fn fetch_global_config( + &self, + ctx: &ctx::Ctx, + ) -> ctx::Result { + match ctx.wait(self.client.consensus_global_config()).await? { + Ok(cfg) => { + let cfg = cfg.context("main node is not running consensus component")?; + return Ok(zksync_protobuf::serde::deserialize(&cfg.0).context("deserialize()")?); + } + // For non-whitelisted methods, proxyd returns HTTP 403 with MethodNotFound in the body. + // For some stupid reason ClientError doesn't expose HTTP error codes. + Err(ClientError::Transport(_)) => {} + // For missing methods api server, returns HTTP 200 with MethodNotFound in the body. + Err(ClientError::Call(err)) if err.code() == ErrorCode::MethodNotFound.code() => {} + Err(err) => { + return Err(err) + .context("consensus_global_config()") + .map_err(|err| err.into()); + } + } + tracing::info!("consensus_global_config() not found, calling consensus_genesis() instead"); let genesis = ctx - .wait(self.client.fetch_consensus_genesis()) + .wait(self.client.consensus_genesis()) .await? - .context("fetch_consensus_genesis()")? + .context("consensus_genesis()")? .context("main node is not running consensus component")?; - // Deserialize the json, but don't allow for unknown fields. - // We need to compute the hash of the Genesis, so simply ignoring the unknown fields won't - // do. - Ok(validator::GenesisRaw::read( - &zksync_protobuf::serde::deserialize_proto_with_options( - &genesis.0, /*deny_unknown_fields=*/ true, - ) - .context("deserialize")?, - )? - .with_hash()) + Ok(consensus_dal::GlobalConfig { + genesis: zksync_protobuf::serde::deserialize(&genesis.0).context("deserialize()")?, + registry_address: None, + }) } #[tracing::instrument(skip_all)] @@ -262,15 +312,12 @@ impl EN { &self, ctx: &ctx::Ctx, ) -> ctx::Result { - match ctx.wait(self.client.fetch_attestation_status()).await? { - Ok(Some(status)) => Ok(zksync_protobuf::serde::deserialize(&status.0) - .context("deserialize(AttestationStatus")?), - Ok(None) => Err(anyhow::format_err!("empty response").into()), - Err(err) => Err(anyhow::format_err!( - "AttestationStatus call to main node HTTP RPC failed: {err:#}" - ) - .into()), - } + let status = ctx + .wait(self.client.attestation_status()) + .await? + .context("attestation_status()")? + .context("main node is not runnign consensus component")?; + Ok(zksync_protobuf::serde::deserialize(&status.0).context("deserialize()")?) } /// Fetches (with retries) the given block from the main node. @@ -278,14 +325,11 @@ impl EN { const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); loop { - let res = ctx.wait(self.client.fetch_l2_block(n, true)).await?; - match res { + match ctx.wait(self.client.sync_l2_block(n, true)).await? { Ok(Some(block)) => return Ok(block.try_into()?), Ok(None) => {} - Err(err) if err.is_retriable() => {} - Err(err) => { - return Err(anyhow::format_err!("client.fetch_l2_block({}): {err}", n).into()); - } + Err(err) if is_retriable(&err) => {} + Err(err) => Err(err).with_context(|| format!("client.sync_l2_block({n})"))?, } ctx.sleep(RETRY_INTERVAL).await?; } diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 574e496f4d1..3150f839680 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -45,6 +45,7 @@ pub async fn run_external_node( sync_state: SyncState, main_node_client: Box>, actions: ActionQueueSender, + build_version: semver::Version, ) -> anyhow::Result<()> { let en = en::EN { pool: ConnectionPool(pool), @@ -58,7 +59,8 @@ pub async fn run_external_node( is_validator = secrets.validator_key.is_some(), "running external node" ); - en.run(ctx, actions, cfg, secrets).await + en.run(ctx, actions, cfg, secrets, Some(build_version)) + .await } None => { tracing::info!("running fetcher"); diff --git a/core/node/consensus/src/lib.rs b/core/node/consensus/src/lib.rs index 13d918b5b6e..ff9cdf86528 100644 --- a/core/node/consensus/src/lib.rs +++ b/core/node/consensus/src/lib.rs @@ -5,6 +5,7 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; +mod abi; // Currently `batch` module is only used in tests, // but will be used in production once batch syncing is implemented in consensus. #[allow(unused)] @@ -13,8 +14,10 @@ mod config; mod en; pub mod era; mod mn; +mod registry; mod storage; #[cfg(test)] pub(crate) mod testonly; #[cfg(test)] mod tests; +mod vm; diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 7de86b4d8ba..4d428346ebe 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -6,9 +6,10 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_dal::consensus_dal; use crate::{ - config, + config, registry, storage::{ConnectionPool, InsertCertificateError, Store}, }; @@ -36,9 +37,9 @@ pub async fn run_main_node( pool.connection(ctx) .await .wrap("connection()")? - .adjust_genesis(ctx, &spec) + .adjust_global_config(ctx, &spec) .await - .wrap("adjust_genesis()")?; + .wrap("adjust_global_config()")?; } // The main node doesn't have a payload queue as it produces all the L2 blocks itself. @@ -47,33 +48,40 @@ pub async fn run_main_node( .wrap("Store::new()")?; s.spawn_bg(runner.run(ctx)); - let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) + let global_config = pool + .connection(ctx) .await - .wrap("BlockStore::new()")?; - s.spawn_bg(runner.run(ctx)); - - let genesis = block_store.genesis().clone(); + .wrap("connection()")? + .global_config(ctx) + .await + .wrap("global_config()")? + .context("global_config() disappeared")?; anyhow::ensure!( - genesis.leader_selection + global_config.genesis.leader_selection == validator::LeaderSelectionMode::Sticky(validator_key.public()), "unsupported leader selection mode - main node has to be the leader" ); + let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) + .await + .wrap("BlockStore::new()")?; + s.spawn_bg(runner.run(ctx)); + let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) .await .wrap("BatchStore::new()")?; s.spawn_bg(runner.run(ctx)); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(run_attestation_updater( + s.spawn_bg(run_attestation_controller( ctx, &pool, - genesis, + global_config, attestation.clone(), )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets)?, + config: config::executor(&cfg, &secrets, None)?, block_store, batch_store, validator: Some(executor::Validator { @@ -93,18 +101,17 @@ pub async fn run_main_node( /// Manages attestation state by configuring the /// next batch to attest and storing the collected /// certificates. -async fn run_attestation_updater( +async fn run_attestation_controller( ctx: &ctx::Ctx, pool: &ConnectionPool, - genesis: validator::Genesis, + cfg: consensus_dal::GlobalConfig, attestation: Arc, ) -> anyhow::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); + let registry = registry::Registry::new(cfg.genesis, pool.clone()).await; + let registry_addr = cfg.registry_address.map(registry::Address::new); + let mut next = attester::BatchNumber(0); let res = async { - let Some(committee) = &genesis.attesters else { - return Ok(()); - }; - let committee = Arc::new(committee.clone()); loop { // After regenesis it might happen that the batch number for the first block // is not immediately known (the first block was not produced yet), @@ -118,10 +125,12 @@ async fn run_attestation_updater( .await .wrap("attestation_status()")? { - Some(status) => break status, - None => ctx.sleep(POLL_INTERVAL).await?, + Some(status) if status.next_batch_to_attest >= next => break status, + _ => {} } + ctx.sleep(POLL_INTERVAL).await?; }; + next = status.next_batch_to_attest.next(); tracing::info!( "waiting for hash of batch {:?}", status.next_batch_to_attest @@ -129,6 +138,22 @@ async fn run_attestation_updater( let hash = pool .wait_for_batch_hash(ctx, status.next_batch_to_attest) .await?; + let Some(committee) = registry + .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + pool.connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; tracing::info!( "attesting batch {:?} with hash {hash:?}", status.next_batch_to_attest @@ -140,7 +165,7 @@ async fn run_attestation_updater( number: status.next_batch_to_attest, genesis: status.genesis, }, - committee: committee.clone(), + committee, })) .await .context("start_attestation()")?; diff --git a/core/node/consensus/src/registry/abi.rs b/core/node/consensus/src/registry/abi.rs new file mode 100644 index 00000000000..55cc7f9264f --- /dev/null +++ b/core/node/consensus/src/registry/abi.rs @@ -0,0 +1,225 @@ +//! Strongly-typed API for ConsensusRegistry contract. +#![allow(dead_code)] + +use std::sync::Arc; + +use anyhow::Context as _; +use zksync_types::{ethabi, ethabi::Token}; + +use crate::abi; + +/// Reprents ConsensusRegistry contract. +#[derive(Debug, Clone)] +pub(crate) struct ConsensusRegistry(Arc); + +impl AsRef for ConsensusRegistry { + fn as_ref(&self) -> ðabi::Contract { + &self.0 + } +} + +impl ConsensusRegistry { + const FILE: &'static str = "contracts/l2-contracts/artifacts-zk/contracts/ConsensusRegistry.sol/ConsensusRegistry.json"; + + /// Loads bytecode of the contract. + #[cfg(test)] + pub(crate) fn bytecode() -> Vec { + zksync_contracts::read_bytecode(Self::FILE) + } + + /// Loads the `ethabi` representation of the contract. + pub(crate) fn load() -> Self { + Self(zksync_contracts::load_contract(ConsensusRegistry::FILE).into()) + } + + /// Constructs a call to function `F` of this contract. + pub(crate) fn call>(&self, inputs: F) -> abi::Call { + abi::Call { + contract: self.clone(), + inputs, + } + } +} + +/// ConsensusRegistry.getAttesterCommittee function. +#[derive(Debug, Default)] +pub(crate) struct GetAttesterCommittee; + +impl abi::Function for GetAttesterCommittee { + type Contract = ConsensusRegistry; + const NAME: &'static str = "getAttesterCommittee"; + + fn encode(&self) -> Vec { + vec![] + } + + type Outputs = Vec; + fn decode_outputs(tokens: Vec) -> anyhow::Result { + let [attesters] = tokens.try_into().ok().context("bad size")?; + let mut res = vec![]; + for token in attesters.into_array().context("not array")? { + res.push(Attester::from_token(token).context("attesters")?); + } + Ok(res) + } +} + +/// ConsensusRegistry.add function. +#[derive(Debug, Default)] +pub(crate) struct Add { + pub(crate) node_owner: ethabi::Address, + pub(crate) validator_weight: u32, + pub(crate) validator_pub_key: BLS12_381PublicKey, + pub(crate) validator_pop: BLS12_381Signature, + pub(crate) attester_weight: u32, + pub(crate) attester_pub_key: Secp256k1PublicKey, +} + +impl abi::Function for Add { + type Contract = ConsensusRegistry; + const NAME: &'static str = "add"; + fn encode(&self) -> Vec { + vec![ + Token::Address(self.node_owner), + Token::Uint(self.validator_weight.into()), + self.validator_pub_key.to_token(), + self.validator_pop.to_token(), + Token::Uint(self.attester_weight.into()), + self.attester_pub_key.to_token(), + ] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.initialize function. +#[derive(Debug, Default)] +pub(crate) struct Initialize { + pub(crate) initial_owner: ethabi::Address, +} + +impl abi::Function for Initialize { + type Contract = ConsensusRegistry; + const NAME: &'static str = "initialize"; + fn encode(&self) -> Vec { + vec![Token::Address(self.initial_owner)] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.commitAttesterCommittee function. +#[derive(Debug, Default)] +pub(crate) struct CommitAttesterCommittee; + +impl abi::Function for CommitAttesterCommittee { + type Contract = ConsensusRegistry; + const NAME: &'static str = "commitAttesterCommittee"; + fn encode(&self) -> Vec { + vec![] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.owner function. +#[derive(Debug, Default)] +pub(crate) struct Owner; + +impl abi::Function for Owner { + type Contract = ConsensusRegistry; + const NAME: &'static str = "owner"; + fn encode(&self) -> Vec { + vec![] + } + type Outputs = ethabi::Address; + fn decode_outputs(tokens: Vec) -> anyhow::Result { + let [owner] = tokens.try_into().ok().context("bad size")?; + owner.into_address().context("not an address") + } +} + +// Auxiliary structs. + +/// Raw representation of a secp256k1 public key. +#[derive(Debug, Default)] +pub(crate) struct Secp256k1PublicKey { + pub(crate) tag: [u8; 1], + pub(crate) x: [u8; 32], +} + +impl Secp256k1PublicKey { + fn from_token(token: Token) -> anyhow::Result { + let [tag, x] = abi::into_tuple(token)?; + Ok(Self { + tag: abi::into_fixed_bytes(tag).context("tag")?, + x: abi::into_fixed_bytes(x).context("x")?, + }) + } + + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.tag.into()), + Token::FixedBytes(self.x.into()), + ]) + } +} + +/// Raw representation of an attester committee member. +#[derive(Debug)] +pub(crate) struct Attester { + pub(crate) weight: u32, + pub(crate) pub_key: Secp256k1PublicKey, +} + +impl Attester { + fn from_token(token: Token) -> anyhow::Result { + let [weight, pub_key] = abi::into_tuple(token)?; + Ok(Self { + weight: abi::into_uint(weight).context("weight")?, + pub_key: Secp256k1PublicKey::from_token(pub_key).context("pub_key")?, + }) + } +} + +/// Raw representation of a BLS12_381 public key. +#[derive(Debug, Default)] +pub(crate) struct BLS12_381PublicKey { + pub(crate) a: [u8; 32], + pub(crate) b: [u8; 32], + pub(crate) c: [u8; 32], +} + +impl BLS12_381PublicKey { + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.a.into()), + Token::FixedBytes(self.b.into()), + Token::FixedBytes(self.c.into()), + ]) + } +} + +#[derive(Debug, Default)] +pub(crate) struct BLS12_381Signature { + pub(crate) a: [u8; 32], + pub(crate) b: [u8; 16], +} + +impl BLS12_381Signature { + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.a.into()), + Token::FixedBytes(self.b.into()), + ]) + } +} diff --git a/core/node/consensus/src/registry/mod.rs b/core/node/consensus/src/registry/mod.rs new file mode 100644 index 00000000000..74da4130957 --- /dev/null +++ b/core/node/consensus/src/registry/mod.rs @@ -0,0 +1,80 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _}; +use zksync_consensus_crypto::ByteFmt; +use zksync_consensus_roles::{attester, validator}; + +use crate::{storage::ConnectionPool, vm::VM}; + +mod abi; +#[cfg(test)] +pub(crate) mod testonly; +#[cfg(test)] +mod tests; + +fn decode_attester_key(k: &abi::Secp256k1PublicKey) -> anyhow::Result { + let mut x = vec![]; + x.extend(k.tag); + x.extend(k.x); + ByteFmt::decode(&x) +} + +fn decode_weighted_attester(a: &abi::Attester) -> anyhow::Result { + Ok(attester::WeightedAttester { + weight: a.weight.into(), + key: decode_attester_key(&a.pub_key).context("key")?, + }) +} + +pub type Address = crate::abi::Address; + +#[derive(Debug)] +pub(crate) struct Registry { + contract: abi::ConsensusRegistry, + genesis: validator::Genesis, + vm: VM, +} + +impl Registry { + pub async fn new(genesis: validator::Genesis, pool: ConnectionPool) -> Self { + Self { + contract: abi::ConsensusRegistry::load(), + genesis, + vm: VM::new(pool).await, + } + } + + /// Attester committee for the given batch. + /// It reads committee from the contract. + /// Falls back to committee specified in the genesis. + pub async fn attester_committee_for( + &self, + ctx: &ctx::Ctx, + address: Option
, + attested_batch: attester::BatchNumber, + ) -> ctx::Result> { + let Some(batch_defining_committee) = attested_batch.prev() else { + // Batch 0 doesn't need attestation. + return Ok(None); + }; + let Some(address) = address else { + return Ok(self.genesis.attesters.clone()); + }; + let raw = self + .vm + .call( + ctx, + batch_defining_committee, + address, + self.contract.call(abi::GetAttesterCommittee), + ) + .await + .wrap("vm.call()")?; + let mut attesters = vec![]; + for a in raw { + attesters.push(decode_weighted_attester(&a).context("decode_weighted_attester()")?); + } + Ok(Some( + attester::Committee::new(attesters.into_iter()).context("Committee::new()")?, + )) + } +} diff --git a/core/node/consensus/src/registry/testonly.rs b/core/node/consensus/src/registry/testonly.rs new file mode 100644 index 00000000000..a0c55a557fe --- /dev/null +++ b/core/node/consensus/src/registry/testonly.rs @@ -0,0 +1,118 @@ +use rand::Rng; +use zksync_consensus_crypto::ByteFmt; +use zksync_consensus_roles::{attester, validator}; +use zksync_test_account::Account; +use zksync_types::{ethabi, Execute, Transaction, U256}; + +use super::*; + +pub(crate) fn make_tx( + account: &mut Account, + address: crate::abi::Address, + call: crate::abi::Call, +) -> Transaction { + account.get_l2_tx_for_execute( + Execute { + contract_address: *address, + calldata: call.calldata().unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ) +} + +pub(crate) struct WeightedValidator { + weight: validator::Weight, + key: validator::PublicKey, + pop: validator::ProofOfPossession, +} + +fn encode_attester_key(k: &attester::PublicKey) -> abi::Secp256k1PublicKey { + let b: [u8; 33] = ByteFmt::encode(k).try_into().unwrap(); + abi::Secp256k1PublicKey { + tag: b[0..1].try_into().unwrap(), + x: b[1..33].try_into().unwrap(), + } +} + +fn encode_validator_key(k: &validator::PublicKey) -> abi::BLS12_381PublicKey { + let b: [u8; 96] = ByteFmt::encode(k).try_into().unwrap(); + abi::BLS12_381PublicKey { + a: b[0..32].try_into().unwrap(), + b: b[32..64].try_into().unwrap(), + c: b[64..96].try_into().unwrap(), + } +} + +fn encode_validator_pop(pop: &validator::ProofOfPossession) -> abi::BLS12_381Signature { + let b: [u8; 48] = ByteFmt::encode(pop).try_into().unwrap(); + abi::BLS12_381Signature { + a: b[0..32].try_into().unwrap(), + b: b[32..48].try_into().unwrap(), + } +} + +pub(crate) fn gen_validator(rng: &mut impl Rng) -> WeightedValidator { + let k: validator::SecretKey = rng.gen(); + WeightedValidator { + key: k.public(), + weight: rng.gen_range(1..100), + pop: k.sign_pop(), + } +} + +pub(crate) fn gen_attester(rng: &mut impl Rng) -> attester::WeightedAttester { + attester::WeightedAttester { + key: rng.gen(), + weight: rng.gen_range(1..100), + } +} + +impl Registry { + pub(crate) fn deploy(&self, account: &mut Account) -> (Address, Transaction) { + let tx = account.get_deploy_tx( + &abi::ConsensusRegistry::bytecode(), + None, + zksync_test_account::TxType::L2, + ); + (Address::new(tx.address), tx.tx) + } + + pub(crate) fn add( + &self, + node_owner: ethabi::Address, + validator: WeightedValidator, + attester: attester::WeightedAttester, + ) -> anyhow::Result> { + Ok(self.contract.call(abi::Add { + node_owner, + validator_pub_key: encode_validator_key(&validator.key), + validator_weight: validator + .weight + .try_into() + .context("overflow") + .context("validator_weight")?, + validator_pop: encode_validator_pop(&validator.pop), + attester_pub_key: encode_attester_key(&attester.key), + attester_weight: attester + .weight + .try_into() + .context("overflow") + .context("attester_weight")?, + })) + } + + pub(crate) fn initialize( + &self, + initial_owner: ethabi::Address, + ) -> crate::abi::Call { + self.contract.call(abi::Initialize { initial_owner }) + } + + pub(crate) fn commit_attester_committee( + &self, + ) -> crate::abi::Call { + self.contract.call(abi::CommitAttesterCommittee) + } +} diff --git a/core/node/consensus/src/registry/tests.rs b/core/node/consensus/src/registry/tests.rs new file mode 100644 index 00000000000..935cd673891 --- /dev/null +++ b/core/node/consensus/src/registry/tests.rs @@ -0,0 +1,91 @@ +use rand::Rng as _; +use zksync_concurrency::{ctx, scope}; +use zksync_consensus_roles::{attester, validator::testonly::Setup}; +use zksync_test_account::Account; +use zksync_types::ProtocolVersionId; + +use super::*; +use crate::storage::ConnectionPool; + +/// Test checking that parsing logic matches the abi specified in the json file. +#[test] +fn test_consensus_registry_abi() { + zksync_concurrency::testonly::abort_on_panic(); + let c = abi::ConsensusRegistry::load(); + c.call(abi::GetAttesterCommittee).test().unwrap(); + c.call(abi::Add::default()).test().unwrap(); + c.call(abi::Initialize::default()).test().unwrap(); + c.call(abi::CommitAttesterCommittee).test().unwrap(); + c.call(abi::Owner).test().unwrap(); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_attester_committee() { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 10); + let account = &mut Account::random(); + let to_fund = &[account.address]; + + scope::run!(ctx, |ctx, s| async { + let pool = ConnectionPool::test(false, ProtocolVersionId::latest()).await; + let registry = Registry::new(setup.genesis.clone(), pool.clone()).await; + + // If the registry contract address is not specified, + // then the committee from genesis should be returned. + let got = registry + .attester_committee_for(ctx, None, attester::BatchNumber(10)) + .await + .unwrap(); + assert_eq!(setup.genesis.attesters, got); + + let (mut node, runner) = crate::testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run_real(ctx, to_fund)); + + // Deploy registry contract and initialize it. + let committee = + attester::Committee::new((0..5).map(|_| testonly::gen_attester(rng))).unwrap(); + let (registry_addr, tx) = registry.deploy(account); + let mut txs = vec![tx]; + let account_addr = account.address(); + txs.push(testonly::make_tx( + account, + registry_addr, + registry.initialize(account_addr), + )); + // Add attesters. + for a in committee.iter() { + txs.push(testonly::make_tx( + account, + registry_addr, + registry + .add(rng.gen(), testonly::gen_validator(rng), a.clone()) + .unwrap(), + )); + } + // Commit the update. + txs.push(testonly::make_tx( + account, + registry_addr, + registry.commit_attester_committee(), + )); + + node.push_block(&txs).await; + node.seal_batch().await; + pool.wait_for_batch(ctx, node.last_batch()).await?; + + // Read the attester committee using the vm. + let batch = attester::BatchNumber(node.last_batch().0.into()); + assert_eq!( + Some(committee), + registry + .attester_committee_for(ctx, Some(registry_addr), batch + 1) + .await + .unwrap() + ); + Ok(()) + }) + .await + .unwrap(); +} diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 6ff2fb1ce0a..512b37e81a1 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -1,13 +1,14 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_crypto::keccak256::Keccak256; -use zksync_consensus_roles::{attester, validator}; +use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; use zksync_consensus_storage::{self as storage, BatchStoreState}; use zksync_dal::{consensus_dal, consensus_dal::Payload, Core, CoreDal, DalError}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; +use zksync_node_api_server::execution_sandbox::{BlockArgs, BlockStartInfo}; use zksync_node_sync::{fetcher::IoCursorExt as _, ActionQueueSender, SyncState}; use zksync_state_keeper::io::common::IoCursor; -use zksync_types::{commitment::L1BatchWithMetadata, L1BatchNumber}; +use zksync_types::{api, commitment::L1BatchWithMetadata, L1BatchNumber}; use super::{InsertCertificateError, PayloadQueue}; use crate::config; @@ -18,7 +19,7 @@ pub(crate) struct ConnectionPool(pub(crate) zksync_dal::ConnectionPool); impl ConnectionPool { /// Wrapper for `connection_tagged()`. - pub(crate) async fn connection<'a>(&'a self, ctx: &ctx::Ctx) -> ctx::Result> { + pub(crate) async fn connection(&self, ctx: &ctx::Ctx) -> ctx::Result> { Ok(Connection( ctx.wait(self.0.connection_tagged("consensus")) .await? @@ -164,6 +165,22 @@ impl<'a> Connection<'a> { .map_err(E::Other)?) } + /// Wrapper for `consensus_dal().upsert_attester_committee()`. + pub async fn upsert_attester_committee( + &mut self, + ctx: &ctx::Ctx, + number: BatchNumber, + committee: &attester::Committee, + ) -> ctx::Result<()> { + ctx.wait( + self.0 + .consensus_dal() + .upsert_attester_committee(number, committee), + ) + .await??; + Ok(()) + } + /// Wrapper for `consensus_dal().replica_state()`. pub async fn replica_state(&mut self, ctx: &ctx::Ctx) -> ctx::Result { Ok(ctx @@ -229,22 +246,22 @@ impl<'a> Connection<'a> { }) } - /// Wrapper for `consensus_dal().genesis()`. - pub async fn genesis(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().genesis()) - .await? - .map_err(DalError::generalize)?) + /// Wrapper for `consensus_dal().global_config()`. + pub async fn global_config( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(ctx.wait(self.0.consensus_dal().global_config()).await??) } - /// Wrapper for `consensus_dal().try_update_genesis()`. - pub async fn try_update_genesis( + /// Wrapper for `consensus_dal().try_update_global_config()`. + pub async fn try_update_global_config( &mut self, ctx: &ctx::Ctx, - genesis: &validator::Genesis, + cfg: &consensus_dal::GlobalConfig, ) -> ctx::Result<()> { Ok(ctx - .wait(self.0.consensus_dal().try_update_genesis(genesis)) + .wait(self.0.consensus_dal().try_update_global_config(cfg)) .await??) } @@ -267,7 +284,7 @@ impl<'a> Connection<'a> { /// (Re)initializes consensus genesis to start at the last L2 block in storage. /// Noop if `spec` matches the current genesis. - pub(crate) async fn adjust_genesis( + pub(crate) async fn adjust_global_config( &mut self, ctx: &ctx::Ctx, spec: &config::GenesisSpec, @@ -277,31 +294,34 @@ impl<'a> Connection<'a> { .await .wrap("start_transaction()")?; - let old = txn.genesis(ctx).await.wrap("genesis()")?; + let old = txn.global_config(ctx).await.wrap("genesis()")?; if let Some(old) = &old { - if &config::GenesisSpec::from_genesis(old) == spec { + if &config::GenesisSpec::from_global_config(old) == spec { // Hard fork is not needed. return Ok(()); } } tracing::info!("Performing a hard fork of consensus."); - let genesis = validator::GenesisRaw { - chain_id: spec.chain_id, - fork_number: old - .as_ref() - .map_or(validator::ForkNumber(0), |old| old.fork_number.next()), - first_block: txn.next_block(ctx).await.context("next_block()")?, - protocol_version: spec.protocol_version, - validators: spec.validators.clone(), - attesters: spec.attesters.clone(), - leader_selection: spec.leader_selection.clone(), - } - .with_hash(); + let new = consensus_dal::GlobalConfig { + genesis: validator::GenesisRaw { + chain_id: spec.chain_id, + fork_number: old.as_ref().map_or(validator::ForkNumber(0), |old| { + old.genesis.fork_number.next() + }), + first_block: txn.next_block(ctx).await.context("next_block()")?, + protocol_version: spec.protocol_version, + validators: spec.validators.clone(), + attesters: spec.attesters.clone(), + leader_selection: spec.leader_selection.clone(), + } + .with_hash(), + registry_address: spec.registry_address, + }; - txn.try_update_genesis(ctx, &genesis) + txn.try_update_global_config(ctx, &new) .await - .wrap("try_update_genesis()")?; + .wrap("try_update_global_config()")?; txn.commit(ctx).await.wrap("commit()")?; Ok(()) } @@ -447,4 +467,29 @@ impl<'a> Connection<'a> { .await? .context("attestation_status()")?) } + + /// Constructs `BlockArgs` for the last block of the batch. + pub async fn vm_block_args( + &mut self, + ctx: &ctx::Ctx, + batch: attester::BatchNumber, + ) -> ctx::Result { + let (_, block) = self + .get_l2_block_range_of_l1_batch(ctx, batch) + .await + .wrap("get_l2_block_range_of_l1_batch()")? + .context("batch not sealed")?; + let block = api::BlockId::Number(api::BlockNumber::Number(block.0.into())); + let start_info = ctx + .wait(BlockStartInfo::new( + &mut self.0, + /*max_cache_age=*/ std::time::Duration::from_secs(10), + )) + .await? + .context("BlockStartInfo::new()")?; + Ok(ctx + .wait(BlockArgs::new(&mut self.0, block, &start_info)) + .await? + .context("BlockArgs::new")?) + } } diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 6a96812ae40..cb8e039d7d0 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -325,9 +325,10 @@ impl storage::PersistentBlockStore for Store { Ok(self .conn(ctx) .await? - .genesis(ctx) + .global_config(ctx) .await? - .context("not found")?) + .context("not found")? + .genesis) } fn persisted(&self) -> sync::watch::Receiver { diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 5d1279afbbf..65c464d98b9 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -1,5 +1,4 @@ //! Storage test helpers. - use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_roles::{attester, validator}; @@ -13,6 +12,7 @@ use zksync_types::{ }; use super::{Connection, ConnectionPool}; +use crate::registry; impl Connection<'_> { /// Wrapper for `consensus_dal().batch_of_block()`. @@ -181,16 +181,16 @@ impl ConnectionPool { want_last: validator::BlockNumber, ) -> ctx::Result> { let blocks = self.wait_for_block_certificates(ctx, want_last).await?; - let genesis = self + let cfg = self .connection(ctx) .await .wrap("connection()")? - .genesis(ctx) + .global_config(ctx) .await .wrap("genesis()")? .context("genesis is missing")?; for block in &blocks { - block.verify(&genesis).context(block.number())?; + block.verify(&cfg.genesis).context(block.number())?; } Ok(blocks) } @@ -199,6 +199,7 @@ impl ConnectionPool { &self, ctx: &ctx::Ctx, want_last: attester::BatchNumber, + registry_addr: Option, ) -> ctx::Result<()> { // Wait for the last batch to be attested. const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); @@ -214,17 +215,17 @@ impl ConnectionPool { ctx.sleep(POLL_INTERVAL).await?; } let mut conn = self.connection(ctx).await.wrap("connection()")?; - let genesis = conn - .genesis(ctx) + let cfg = conn + .global_config(ctx) .await - .wrap("genesis()")? - .context("genesis is missing")?; + .wrap("global_config()")? + .context("global config is missing")?; let first = conn - .batch_of_block(ctx, genesis.first_block) + .batch_of_block(ctx, cfg.genesis.first_block) .await .wrap("batch_of_block()")? .context("batch of first_block is missing")?; - let committee = genesis.attesters.as_ref().unwrap(); + let registry = registry::Registry::new(cfg.genesis.clone(), self.clone()).await; for i in first.0..want_last.0 { let i = attester::BatchNumber(i); let hash = conn @@ -240,8 +241,13 @@ impl ConnectionPool { if cert.message.hash != hash { return Err(anyhow::format_err!("cert[{i:?}]: hash mismatch").into()); } - cert.verify(genesis.hash(), committee) - .context("cert[{i:?}].verify()")?; + let committee = registry + .attester_committee_for(ctx, registry_addr, i) + .await + .context("attester_committee_for()")? + .context("committee not specified")?; + cert.verify(cfg.genesis.hash(), &committee) + .with_context(|| format!("cert[{i:?}].verify()"))?; } Ok(()) } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 90063772da9..241998f2692 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -42,8 +42,9 @@ use zksync_state_keeper::{ }; use zksync_test_account::Account; use zksync_types::{ + ethabi, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput}, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, + L1BatchNumber, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, Transaction, }; use zksync_web3_decl::client::{Client, DynClient, L2}; @@ -54,6 +55,7 @@ use crate::{ }; /// Fake StateKeeper for tests. +#[derive(Debug)] pub(super) struct StateKeeper { protocol_version: ProtocolVersionId, // Batch of the `last_block`. @@ -62,8 +64,6 @@ pub(super) struct StateKeeper { // timestamp of the last block. last_timestamp: u64, batch_sealed: bool, - // test L2 account - account: Account, next_priority_op: PriorityOpId, actions_sender: ActionQueueSender, @@ -116,6 +116,7 @@ pub(super) fn new_configs( }) .collect(), leader: config::ValidatorPublicKey(setup.validator_keys[0].public().encode()), + registry_address: None, }; network::testonly::new_configs(rng, setup, gossip_peers) .into_iter() @@ -183,7 +184,6 @@ pub(super) struct StateKeeperRunner { addr: sync::watch::Sender>, rocksdb_dir: tempfile::TempDir, metadata_calculator: MetadataCalculator, - account: Account, } impl StateKeeper { @@ -242,7 +242,6 @@ impl StateKeeper { .await .context("MetadataCalculator::new()")?; let tree_reader = metadata_calculator.tree_reader(); - let account = Account::random(); Ok(( Self { protocol_version, @@ -256,7 +255,6 @@ impl StateKeeper { addr: addr.subscribe(), pool: pool.clone(), tree_reader, - account: account.clone(), }, StateKeeperRunner { actions_queue, @@ -265,7 +263,6 @@ impl StateKeeper { addr, rocksdb_dir, metadata_calculator, - account, }, )) } @@ -306,22 +303,29 @@ impl StateKeeper { } } - /// Pushes a new L2 block with `transactions` transactions to the `StateKeeper`. - pub async fn push_random_block(&mut self, rng: &mut impl Rng) { + pub async fn push_block(&mut self, txs: &[Transaction]) { let mut actions = vec![self.open_block()]; - for _ in 0..rng.gen_range(3..8) { - let tx = match rng.gen() { - true => l2_transaction(&mut self.account, 1_000_000), + actions.extend( + txs.iter() + .map(|tx| FetchedTransaction::new(tx.clone()).into()), + ); + actions.push(SyncAction::SealL2Block); + self.actions_sender.push_actions(actions).await.unwrap(); + } + + /// Pushes a new L2 block with `transactions` transactions to the `StateKeeper`. + pub async fn push_random_block(&mut self, rng: &mut impl Rng, account: &mut Account) { + let txs: Vec<_> = (0..rng.gen_range(3..8)) + .map(|_| match rng.gen() { + true => l2_transaction(account, 1_000_000), false => { - let tx = l1_transaction(&mut self.account, self.next_priority_op); + let tx = l1_transaction(account, self.next_priority_op); self.next_priority_op += 1; tx } - }; - actions.push(FetchedTransaction::new(tx).into()); - } - actions.push(SyncAction::SealL2Block); - self.actions_sender.push_actions(actions).await.unwrap(); + }) + .collect(); + self.push_block(&txs).await; } /// Pushes `SealBatch` command to the `StateKeeper`. @@ -334,14 +338,19 @@ impl StateKeeper { } /// Pushes `count` random L2 blocks to the StateKeeper. - pub async fn push_random_blocks(&mut self, rng: &mut impl Rng, count: usize) { + pub async fn push_random_blocks( + &mut self, + rng: &mut impl Rng, + account: &mut Account, + count: usize, + ) { for _ in 0..count { // 20% chance to seal an L1 batch. // `seal_batch()` also produces a (fictive) block. if rng.gen_range(0..100) < 20 { self.seal_batch().await; } else { - self.push_random_block(rng).await; + self.push_random_block(rng, account).await; } } } @@ -451,7 +460,13 @@ impl StateKeeper { client, sync_state: self.sync_state.clone(), } - .run(ctx, self.actions_sender, cfgs.config, cfgs.secrets) + .run( + ctx, + self.actions_sender, + cfgs.config, + cfgs.secrets, + cfgs.net.build_version, + ) .await } } @@ -534,14 +549,21 @@ async fn mock_metadata_calculator_step(ctx: &ctx::Ctx, pool: &ConnectionPool) -> impl StateKeeperRunner { // Executes the state keeper task with real metadata calculator task // and fake commitment generator (because real one is too slow). - pub async fn run_real(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + pub async fn run_real( + self, + ctx: &ctx::Ctx, + addrs_to_fund: &[ethabi::Address], + ) -> anyhow::Result<()> { let res = scope::run!(ctx, |ctx, s| async { - // Fund the test account. Required for L2 transactions to succeed. - fund(&self.pool.0, &[self.account.address]).await; + // Fund the test accounts. Required for L2 transactions to succeed. + fund(&self.pool.0, addrs_to_fund).await; let (stop_send, stop_recv) = sync::watch::channel(false); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(self.pool.0.clone(), Address::repeat_byte(11), 5); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + self.pool.0.clone(), + ethabi::Address::repeat_byte(11), + 5, + ); let io = ExternalIO::new( self.pool.0.clone(), @@ -649,8 +671,11 @@ impl StateKeeperRunner { pub async fn run(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { let res = scope::run!(ctx, |ctx, s| async { let (stop_send, stop_recv) = sync::watch::channel(false); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(self.pool.0.clone(), Address::repeat_byte(11), 5); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + self.pool.0.clone(), + ethabi::Address::repeat_byte(11), + 5, + ); let tree_writes_persistence = TreeWritesPersistence::new(self.pool.0.clone()); let io = ExternalIO::new( diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index b245d0524aa..abd35508c7f 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -1,17 +1,24 @@ use anyhow::Context as _; -use test_casing::{test_casing, Product}; +use rand::Rng as _; +use test_casing::test_casing; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; use zksync_consensus_roles::{ attester, validator::testonly::{Setup, SetupSpec}, }; -use zksync_dal::consensus_dal::AttestationStatus; -use zksync_node_sync::MainNodeClient; +use zksync_dal::consensus_dal; +use zksync_test_account::Account; use zksync_types::{L1BatchNumber, ProtocolVersionId}; +use zksync_web3_decl::namespaces::EnNamespaceClient as _; -use super::{FROM_SNAPSHOT, VERSIONS}; -use crate::{mn::run_main_node, storage::ConnectionPool, testonly}; +use super::VERSIONS; +use crate::{ + mn::run_main_node, + registry::{testonly, Registry}, + storage::ConnectionPool, + testonly::{new_configs, StateKeeper}, +}; #[test_casing(2, VERSIONS)] #[tokio::test] @@ -19,24 +26,31 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::test(false, version).await; - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + let (mut sk, runner) = StateKeeper::new(ctx, pool.clone()).await?; s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); // Setup nontrivial genesis. while sk.last_sealed_batch() < L1BatchNumber(3) { - sk.push_random_blocks(rng, 10).await; + sk.push_random_blocks(rng, account, 10).await; } let mut setup = SetupSpec::new(rng, 3); setup.first_block = sk.last_block(); let first_batch = sk.last_batch(); let setup = Setup::from(setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &setup.genesis) - .await - .wrap("try_update_genesis()")?; + conn.try_update_global_config( + ctx, + &consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + }, + ) + .await + .wrap("try_update_global_config()")?; // Make sure that the first_batch is actually sealed. sk.seal_batch().await; pool.wait_for_batch(ctx, first_batch).await?; @@ -44,11 +58,11 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Connect to API endpoint. let api = sk.connect(ctx).await?; let fetch_status = || async { - let s = api - .fetch_attestation_status() - .await? + let s = ctx + .wait(api.attestation_status()) + .await?? .context("no attestation_status")?; - let s: AttestationStatus = + let s: consensus_dal::AttestationStatus = zksync_protobuf::serde::deserialize(&s.0).context("deserialize()")?; anyhow::ensure!(s.genesis == setup.genesis.hash(), "genesis hash mismatch"); Ok(s) @@ -62,24 +76,37 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { attester::BatchNumber(first_batch.0.into()) ); - // Insert a (fake) cert, then check again. + tracing::info!("Insert a cert"); { let mut conn = pool.connection(ctx).await?; let number = status.next_batch_to_attest; let hash = conn.batch_hash(ctx, number).await?.unwrap(); - let genesis = conn.genesis(ctx).await?.unwrap().hash(); + let gcfg = conn.global_config(ctx).await?.unwrap(); + let m = attester::Batch { + number, + hash, + genesis: gcfg.genesis.hash(), + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } let cert = attester::BatchQC { - signatures: attester::MultiSig::default(), - message: attester::Batch { - number, - hash, - genesis, - }, + signatures: sigs, + message: m, }; + conn.upsert_attester_committee( + ctx, + cert.message.number, + setup.genesis.attesters.as_ref().unwrap(), + ) + .await + .context("upsert_attester_committee")?; conn.insert_batch_certificate(ctx, &cert) .await .context("insert_batch_certificate()")?; } + tracing::info!("Check again."); let want = status.next_batch_to_attest.next(); let got = fetch_status().await?; assert_eq!(want, got.next_batch_to_attest); @@ -93,34 +120,65 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Test running a couple of attesters (which are also validators). // Main node is expected to collect all certificates. // External nodes are expected to just vote for the batch. -// -// TODO: it would be nice to use `StateKeeperRunner::run_real()` in this test, -// however as of now it doesn't work with ENs and it doesn't work with -// `ConnectionPool::from_snapshot`. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(2, VERSIONS)] #[tokio::test] -async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_multiple_attesters(version: ProtocolVersionId) { const NODES: usize = 4; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let to_fund = &[account.address]; let setup = Setup::new(rng, 4); - let cfgs = testonly::new_configs(rng, &setup, NODES); - + let mut cfgs = new_configs(rng, &setup, NODES); scope::run!(ctx, |ctx, s| async { - let validator_pool = ConnectionPool::test(from_snapshot, version).await; - let (mut validator, runner) = - testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + let validator_pool = ConnectionPool::test(false, version).await; + let (mut validator, runner) = StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(async { runner - .run(ctx) + .run_real(ctx, to_fund) .instrument(tracing::info_span!("validator")) .await .context("validator") }); - // API server needs at least 1 L1 batch to start. + + tracing::info!("deploy registry with 1 attester"); + let attesters: Vec<_> = setup.genesis.attesters.as_ref().unwrap().iter().collect(); + let registry = Registry::new(setup.genesis.clone(), validator_pool.clone()).await; + let (registry_addr, tx) = registry.deploy(account); + cfgs[0] + .config + .genesis_spec + .as_mut() + .unwrap() + .registry_address = Some(*registry_addr); + let mut txs = vec![tx]; + txs.push(testonly::make_tx( + account, + registry_addr, + registry.initialize(account.address), + )); + txs.push(testonly::make_tx( + account, + registry_addr, + registry + .add( + rng.gen(), + testonly::gen_validator(rng), + attesters[0].clone(), + ) + .unwrap(), + )); + txs.push(testonly::make_tx( + account, + registry_addr, + registry.commit_attester_committee(), + )); + validator.push_block(&txs).await; validator.seal_batch().await; + + tracing::info!("wait for the batch to be processed before starting consensus"); validator_pool .wait_for_payload(ctx, validator.last_block()) .await?; @@ -137,13 +195,13 @@ async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId let mut node_pools = vec![]; for (i, cfg) in cfgs[1..].iter().enumerate() { let i = ctx::NoCopy(i); - let pool = ConnectionPool::test(from_snapshot, version).await; - let (node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + let pool = ConnectionPool::test(false, version).await; + let (node, runner) = StateKeeper::new(ctx, pool.clone()).await?; node_pools.push(pool.clone()); s.spawn_bg(async { let i = i; runner - .run(ctx) + .run_real(ctx, to_fund) .instrument(tracing::info_span!("node", i = *i)) .await .with_context(|| format!("node{}", *i)) @@ -151,13 +209,31 @@ async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId s.spawn_bg(node.run_consensus(ctx, validator.connect(ctx).await?, cfg.clone())); } - tracing::info!("Create some batches"); - validator.push_random_blocks(rng, 20).await; - validator.seal_batch().await; + tracing::info!("add attesters one by one"); + #[allow(clippy::needless_range_loop)] + for i in 1..attesters.len() { + let txs = vec![ + testonly::make_tx( + account, + registry_addr, + registry + .add( + rng.gen(), + testonly::gen_validator(rng), + attesters[i].clone(), + ) + .unwrap(), + ), + testonly::make_tx(account, registry_addr, registry.commit_attester_committee()), + ]; + validator.push_block(&txs).await; + validator.seal_batch().await; + } + tracing::info!("Wait for the batches to be attested"); let want_last = attester::BatchNumber(validator.last_sealed_batch().0.into()); validator_pool - .wait_for_batch_certificates_and_verify(ctx, want_last) + .wait_for_batch_certificates_and_verify(ctx, want_last, Some(registry_addr)) .await?; Ok(()) }) diff --git a/core/node/consensus/src/tests/batch.rs b/core/node/consensus/src/tests/batch.rs index 41d73fdb87c..f0cae7f2c02 100644 --- a/core/node/consensus/src/tests/batch.rs +++ b/core/node/consensus/src/tests/batch.rs @@ -1,6 +1,7 @@ use test_casing::{test_casing, Product}; use zksync_concurrency::{ctx, scope}; use zksync_consensus_roles::validator; +use zksync_test_account::Account; use zksync_types::{L1BatchNumber, ProtocolVersionId}; use super::{FROM_SNAPSHOT, VERSIONS}; @@ -13,6 +14,7 @@ async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersion let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let pool = ConnectionPool::test(from_snapshot, version).await; + let account = &mut Account::random(); // Fill storage with unsigned L2 blocks and L1 batches in a way that the // last L1 batch is guaranteed to have some L2 blocks executed in it. @@ -23,11 +25,11 @@ async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersion for _ in 0..3 { for _ in 0..2 { - sk.push_random_block(rng).await; + sk.push_random_block(rng, account).await; } sk.seal_batch().await; } - sk.push_random_block(rng).await; + sk.push_random_block(rng, account).await; pool.wait_for_payload(ctx, sk.last_block()).await?; @@ -84,11 +86,13 @@ async fn test_batch_witness(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let to_fund = &[account.address]; scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::from_genesis(version).await; let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run_real(ctx)); + s.spawn_bg(runner.run_real(ctx, to_fund)); tracing::info!("analyzing storage"); { @@ -101,7 +105,7 @@ async fn test_batch_witness(version: ProtocolVersionId) { } // Seal a bunch of batches. - node.push_random_blocks(rng, 10).await; + node.push_random_blocks(rng, account, 10).await; node.seal_batch().await; pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; // We can verify only 2nd batch onward, because diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 0b611d55f06..91f01f865a2 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -7,6 +7,8 @@ use zksync_consensus_roles::{ validator::testonly::{Setup, SetupSpec}, }; use zksync_consensus_storage::BlockStore; +use zksync_dal::consensus_dal; +use zksync_test_account::Account; use zksync_types::ProtocolVersionId; use crate::{ @@ -28,6 +30,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let pool = ConnectionPool::test(false, version).await; + let account = &mut Account::random(); // Fill storage with unsigned L2 blocks. // Fetch a suffix of blocks that we will generate (fake) certs for. @@ -35,15 +38,21 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // Start state keeper. let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; s.spawn_bg(runner.run(ctx)); - sk.push_random_blocks(rng, 10).await; + sk.push_random_blocks(rng, account, 10).await; pool.wait_for_payload(ctx, sk.last_block()).await?; let mut setup = SetupSpec::new(rng, 3); setup.first_block = validator::BlockNumber(4); let mut setup = Setup::from(setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &setup.genesis) - .await - .wrap("try_update_genesis()")?; + conn.try_update_global_config( + ctx, + &consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + }, + ) + .await + .wrap("try_update_global_config()")?; for i in setup.genesis.first_block.0..sk.last_block().next().0 { let i = validator::BlockNumber(i); let payload = conn @@ -95,6 +104,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Start state keeper."); @@ -103,7 +113,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx)); tracing::info!("Populate storage with a bunch of blocks."); - sk.push_random_blocks(rng, 5).await; + sk.push_random_blocks(rng, account, 5).await; pool .wait_for_payload(ctx, sk.last_block()) .await @@ -118,7 +128,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { s.spawn_bg(run_main_node(ctx, cfg.config.clone(), cfg.secrets.clone(), pool.clone())); tracing::info!("Generate couple more blocks and wait for consensus to catch up."); - sk.push_random_blocks(rng, 3).await; + sk.push_random_blocks(rng, account, 3).await; pool .wait_for_block_certificate(ctx, sk.last_block()) .await @@ -126,7 +136,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Synchronously produce blocks one by one, and wait for consensus."); for _ in 0..2 { - sk.push_random_blocks(rng, 1).await; + sk.push_random_blocks(rng, account, 1).await; pool .wait_for_block_certificate(ctx, sk.last_block()) .await @@ -158,6 +168,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("spawn validator"); @@ -173,7 +184,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { )); tracing::info!("produce some batches"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool .wait_for_block_certificate(ctx, validator.last_block()) @@ -191,7 +202,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { }); tracing::info!("produce more batches"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; node_pool .wait_for_block_certificate(ctx, validator.last_block()) @@ -209,7 +220,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { }); tracing::info!("produce more blocks and compare storages"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; let want = validator_pool .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; @@ -243,6 +254,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); // topology: // validator <-> node <-> node <-> ... @@ -264,7 +276,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { .context("validator") }); tracing::info!("Generate a couple of blocks, before initializing consensus genesis."); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; // API server needs at least 1 L1 batch to start. validator.seal_batch().await; validator_pool @@ -299,7 +311,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Make validator produce blocks and wait for fetchers to get them."); // Note that block from before and after genesis have to be fetched. - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); let want = validator_pool .wait_for_block_certificates_and_verify(ctx, want_last) @@ -328,6 +340,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); let cfgs = testonly::new_configs(rng, &setup, 1); + let account = &mut Account::random(); // Run all nodes in parallel. scope::run!(ctx, |ctx, s| async { @@ -342,7 +355,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { .context("main_node") }); tracing::info!("Generate a couple of blocks, before initializing consensus genesis."); - main_node.push_random_blocks(rng, 5).await; + main_node.push_random_blocks(rng, account, 5).await; // API server needs at least 1 L1 batch to start. main_node.seal_batch().await; main_node_pool @@ -381,7 +394,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { } tracing::info!("Make the main node produce blocks and wait for consensus to finalize them"); - main_node.push_random_blocks(rng, 5).await; + main_node.push_random_blocks(rng, account, 5).await; let want_last = main_node.last_block(); let want = main_node_pool .wait_for_block_certificates_and_verify(ctx, want_last) @@ -409,6 +422,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn validator."); @@ -433,7 +447,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; node_pool .wait_for_block_certificate(ctx, validator.last_block()) .await?; @@ -447,7 +461,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_fetcher(ctx, client.clone())); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; node_pool .wait_for_payload(ctx, validator.last_block()) .await?; @@ -461,7 +475,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg)); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; let want = validator_pool .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; @@ -488,6 +502,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; @@ -535,7 +550,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { }); tracing::info!("Sync some blocks"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; let to_prune = validator.last_sealed_batch(); tracing::info!( @@ -546,7 +561,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { tracing::info!( "Seal another batch to make sure that there is at least 1 sealed batch after pruning." ); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool .wait_for_batch(ctx, validator.last_sealed_batch()) @@ -565,7 +580,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { .prune_batches(ctx, to_prune) .await .context("prune_batches")?; - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; node_pool .wait_for_block_certificates(ctx, validator.last_block()) .await @@ -582,6 +597,7 @@ async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionI zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn a validator."); @@ -601,7 +617,7 @@ async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionI s.spawn_bg(node.run_fetcher(ctx, validator.connect(ctx).await?)); tracing::info!("Produce some blocks and wait for node to fetch them"); - validator.push_random_blocks(rng, 10).await; + validator.push_random_blocks(rng, account, 10).await; let want = validator_pool .wait_for_payload(ctx, validator.last_block()) .await?; diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs new file mode 100644 index 00000000000..11b6b5c67e3 --- /dev/null +++ b/core/node/consensus/src/vm.rs @@ -0,0 +1,98 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_consensus_roles::attester; +use zksync_node_api_server::{ + execution_sandbox::{TransactionExecutor, TxSetupArgs, VmConcurrencyLimiter}, + tx_sender::MultiVMBaseSystemContracts, +}; +use zksync_state::PostgresStorageCaches; +use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; +use zksync_types::{ + ethabi, fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256, +}; +use zksync_vm_interface::{ + ExecutionResult, OneshotTracingParams, TxExecutionArgs, TxExecutionMode, +}; + +use crate::{abi, storage::ConnectionPool}; + +/// VM executes eth_calls on the db. +#[derive(Debug)] +pub(crate) struct VM { + pool: ConnectionPool, + setup_args: TxSetupArgs, + limiter: VmConcurrencyLimiter, +} + +impl VM { + /// Constructs a new `VM` instance. + pub async fn new(pool: ConnectionPool) -> Self { + Self { + pool, + setup_args: TxSetupArgs { + execution_mode: TxExecutionMode::EthCall, + operator_account: AccountTreeId::default(), + fee_input: BatchFeeInput::sensible_l1_pegged_default(), + base_system_contracts: scope::wait_blocking( + MultiVMBaseSystemContracts::load_eth_call_blocking, + ) + .await, + caches: PostgresStorageCaches::new(1, 1), + validation_computational_gas_limit: u32::MAX, + chain_id: L2ChainId::default(), + whitelisted_tokens_for_aa: vec![], + enforced_base_fee: None, + }, + limiter: VmConcurrencyLimiter::new(1).0, + } + } + + // FIXME (PLA-1018): switch to oneshot executor + pub async fn call( + &self, + ctx: &ctx::Ctx, + batch: attester::BatchNumber, + address: abi::Address, + call: abi::Call, + ) -> ctx::Result { + let tx = L2Tx::new( + *address, + call.calldata().context("call.calldata()")?, + Nonce(0), + Fee { + gas_limit: U256::from(2000000000u32), + max_fee_per_gas: U256::zero(), + max_priority_fee_per_gas: U256::zero(), + gas_per_pubdata_limit: U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE), + }, + ethabi::Address::zero(), + U256::zero(), + vec![], + Default::default(), + ); + let permit = ctx.wait(self.limiter.acquire()).await?.unwrap(); + let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; + let args = conn + .vm_block_args(ctx, batch) + .await + .wrap("vm_block_args()")?; + let output = ctx + .wait(TransactionExecutor::real(usize::MAX).execute_tx_in_sandbox( + permit, + self.setup_args.clone(), + TxExecutionArgs::for_eth_call(tx.clone()), + conn.0, + args, + None, + OneshotTracingParams::default(), + )) + .await? + .context("execute_tx_in_sandbox()")?; + match output.vm.result { + ExecutionResult::Success { output } => { + Ok(call.decode_outputs(&output).context("decode_output()")?) + } + other => Err(anyhow::format_err!("unsuccessful execution: {other:?}").into()), + } + } +} diff --git a/core/node/consistency_checker/Cargo.toml b/core/node/consistency_checker/Cargo.toml index 769690b493a..ed2cbd5bbd7 100644 --- a/core/node/consistency_checker/Cargo.toml +++ b/core/node/consistency_checker/Cargo.toml @@ -14,7 +14,6 @@ categories.workspace = true zksync_contracts.workspace = true zksync_dal.workspace = true zksync_eth_client.workspace = true -zksync_eth_sender.workspace = true zksync_health_check.workspace = true zksync_l1_contract_interface.workspace = true zksync_shared_metrics.workspace = true diff --git a/core/node/db_pruner/Cargo.toml b/core/node/db_pruner/Cargo.toml index eb21e3e476d..98eba1b6c0e 100644 --- a/core/node/db_pruner/Cargo.toml +++ b/core/node/db_pruner/Cargo.toml @@ -26,7 +26,6 @@ serde_json.workspace = true [dev-dependencies] assert_matches.workspace = true -test-casing.workspace = true test-log.workspace = true zksync_node_genesis.workspace = true diff --git a/core/node/fee_model/Cargo.toml b/core/node/fee_model/Cargo.toml index 09048515e7a..8760b97d9db 100644 --- a/core/node/fee_model/Cargo.toml +++ b/core/node/fee_model/Cargo.toml @@ -27,4 +27,3 @@ tracing.workspace = true [dev-dependencies] test-casing.workspace = true -zksync_node_test_utils.workspace = true diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index fe488922567..2288c0ddbe8 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -18,7 +18,6 @@ zksync_health_check.workspace = true zksync_dal.workspace = true zksync_db_connection.workspace = true zksync_config.workspace = true -zksync_protobuf_config.workspace = true zksync_state.workspace = true zksync_object_store.workspace = true zksync_storage.workspace = true @@ -64,9 +63,9 @@ futures.workspace = true anyhow.workspace = true tokio = { workspace = true, features = ["rt"] } ctrlc.workspace = true +semver.workspace = true [dev-dependencies] -zksync_env_config.workspace = true assert_matches.workspace = true # For running UI tests for proc macro trybuild.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/consensus/external_node.rs b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs index 14365384c1a..5acdab568e7 100644 --- a/core/node/node_framework/src/implementations/layers/consensus/external_node.rs +++ b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs @@ -23,6 +23,7 @@ use crate::{ /// Wiring layer for external node consensus component. #[derive(Debug)] pub struct ExternalNodeConsensusLayer { + pub build_version: semver::Version, pub config: Option, pub secrets: Option, } @@ -78,6 +79,7 @@ impl WiringLayer for ExternalNodeConsensusLayer { }; let consensus_task = ExternalNodeTask { + build_version: self.build_version, config, pool, main_node_client, @@ -90,6 +92,7 @@ impl WiringLayer for ExternalNodeConsensusLayer { #[derive(Debug)] pub struct ExternalNodeTask { + build_version: semver::Version, config: Option<(ConsensusConfig, ConsensusSecrets)>, pool: ConnectionPool, main_node_client: Box>, @@ -118,6 +121,7 @@ impl Task for ExternalNodeTask { self.sync_state, self.main_node_client, self.action_queue_sender, + self.build_version, )); // `run_external_node` might return an error or panic, // in which case we need to return immediately, diff --git a/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs b/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs index 317f0b197d8..bdd69214de9 100644 --- a/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs +++ b/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs @@ -76,16 +76,18 @@ impl WiringLayer for ExternalNodeInitStrategyLayer { }); let snapshot_recovery = match self.snapshot_recovery_config { Some(recovery_config) => { + // Add a connection for checking whether the storage is initialized. let recovery_pool = input .master_pool - .get_custom(self.max_postgres_concurrency.get() as u32) + .get_custom(self.max_postgres_concurrency.get() as u32 + 1) .await?; - let recovery = Arc::new(ExternalNodeSnapshotRecovery { + let recovery: Arc = Arc::new(ExternalNodeSnapshotRecovery { client: client.clone(), pool: recovery_pool, + max_concurrency: self.max_postgres_concurrency, recovery_config, app_health, - }) as Arc; + }); Some(recovery) } None => None, diff --git a/core/node/node_framework/src/service/mod.rs b/core/node/node_framework/src/service/mod.rs index 9e3555f22c2..b6d42009354 100644 --- a/core/node/node_framework/src/service/mod.rs +++ b/core/node/node_framework/src/service/mod.rs @@ -200,7 +200,7 @@ impl ZkStackService { // Report all the errors we've met during the init. if !errors.is_empty() { for (layer, error) in &errors { - tracing::error!("Wiring layer {layer} can't be initialized: {error}"); + tracing::error!("Wiring layer {layer} can't be initialized: {error:?}"); } return Err(ZkStackServiceError::Wiring(errors)); } @@ -302,7 +302,7 @@ impl ZkStackService { tracing::info!("Shutdown hook {name} completed"); } Ok(Err(err)) => { - tracing::error!("Shutdown hook {name} failed: {err}"); + tracing::error!("Shutdown hook {name} failed: {err:?}"); self.errors.push(TaskError::ShutdownHookFailed(name, err)); } Err(_) => { @@ -324,7 +324,7 @@ impl ZkStackService { tracing::info!("Task {task_name} finished"); } Ok(Err(err)) => { - tracing::error!("Task {task_name} failed: {err}"); + tracing::error!("Task {task_name} failed: {err:?}"); self.errors.push(TaskError::TaskFailed(task_name, err)); } Err(panic_err) => { diff --git a/core/node/node_storage_init/src/external_node/snapshot_recovery.rs b/core/node/node_storage_init/src/external_node/snapshot_recovery.rs index d9ba60a1bcb..9bc065b939c 100644 --- a/core/node/node_storage_init/src/external_node/snapshot_recovery.rs +++ b/core/node/node_storage_init/src/external_node/snapshot_recovery.rs @@ -1,4 +1,4 @@ -use std::{sync::Arc, time::Instant}; +use std::{num::NonZeroUsize, sync::Arc, time::Instant}; use anyhow::Context as _; use tokio::sync::watch; @@ -17,6 +17,7 @@ use crate::{InitializeStorage, SnapshotRecoveryConfig}; pub struct ExternalNodeSnapshotRecovery { pub client: Box>, pub pool: ConnectionPool, + pub max_concurrency: NonZeroUsize, pub recovery_config: SnapshotRecoveryConfig, pub app_health: Arc, } @@ -24,8 +25,17 @@ pub struct ExternalNodeSnapshotRecovery { #[async_trait::async_trait] impl InitializeStorage for ExternalNodeSnapshotRecovery { async fn initialize_storage(&self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { - let pool = self.pool.clone(); tracing::warn!("Proceeding with snapshot recovery. This is an experimental feature; use at your own risk"); + + let pool_size = self.pool.max_size() as usize; + if pool_size < self.max_concurrency.get() + 1 { + tracing::error!( + "Connection pool has insufficient number of connections ({pool_size} vs concurrency {} + 1 connection for checks). \ + This will likely lead to pool starvation during recovery.", + self.max_concurrency + ); + } + let object_store_config = self.recovery_config.object_store_config.clone().context( "Snapshot object store must be presented if snapshot recovery is activated", @@ -34,10 +44,13 @@ impl InitializeStorage for ExternalNodeSnapshotRecovery { .create_store() .await?; - let config = SnapshotsApplierConfig::default(); + let config = SnapshotsApplierConfig { + max_concurrency: self.max_concurrency, + ..SnapshotsApplierConfig::default() + }; let mut snapshots_applier_task = SnapshotsApplierTask::new( config, - pool, + self.pool.clone(), Box::new(self.client.clone().for_component("snapshot_recovery")), object_store, ); @@ -80,3 +93,60 @@ impl InitializeStorage for ExternalNodeSnapshotRecovery { Ok(completed) } } + +#[cfg(test)] +mod tests { + use std::future; + + use zksync_types::{ + tokens::{TokenInfo, TokenMetadata}, + Address, L2BlockNumber, + }; + use zksync_web3_decl::client::MockClient; + + use super::*; + + #[tokio::test] + async fn recovery_does_not_starve_pool_connections() { + let pool = ConnectionPool::constrained_test_pool(5).await; + let app_health = Arc::new(AppHealthCheck::new(None, None)); + let client = MockClient::builder(L2::default()) + .method("en_syncTokens", |_number: Option| { + Ok(vec![TokenInfo { + l1_address: Address::repeat_byte(1), + l2_address: Address::repeat_byte(2), + metadata: TokenMetadata { + name: "test".to_string(), + symbol: "TEST".to_string(), + decimals: 18, + }, + }]) + }) + .build(); + let recovery = ExternalNodeSnapshotRecovery { + client: Box::new(client), + pool, + max_concurrency: NonZeroUsize::new(4).unwrap(), + recovery_config: SnapshotRecoveryConfig { + snapshot_l1_batch_override: None, + drop_storage_key_preimages: false, + object_store_config: None, + }, + app_health, + }; + + // Emulate recovery by indefinitely holding onto `max_concurrency` connections. In practice, + // the snapshot applier will release connections eventually, but it may require more time than the connection + // acquisition timeout configured for the DB pool. + for _ in 0..recovery.max_concurrency.get() { + let connection = recovery.pool.connection().await.unwrap(); + tokio::spawn(async move { + future::pending::<()>().await; + drop(connection); + }); + } + + // The only token reported by the mock client isn't recovered + assert!(!recovery.is_initialized().await.unwrap()); + } +} diff --git a/core/node/node_sync/src/client.rs b/core/node/node_sync/src/client.rs index d064803eab5..ee89db10ddd 100644 --- a/core/node/node_sync/src/client.rs +++ b/core/node/node_sync/src/client.rs @@ -42,12 +42,7 @@ pub trait MainNodeClient: 'static + Send + Sync + fmt::Debug { with_transactions: bool, ) -> EnrichedClientResult>; - async fn fetch_consensus_genesis(&self) -> EnrichedClientResult>; - async fn fetch_genesis_config(&self) -> EnrichedClientResult; - - async fn fetch_attestation_status(&self) - -> EnrichedClientResult>; } #[async_trait] @@ -133,20 +128,6 @@ impl MainNodeClient for Box> { .with_arg("with_transactions", &with_transactions) .await } - - async fn fetch_consensus_genesis(&self) -> EnrichedClientResult> { - self.consensus_genesis() - .rpc_context("consensus_genesis") - .await - } - - async fn fetch_attestation_status( - &self, - ) -> EnrichedClientResult> { - self.attestation_status() - .rpc_context("attestation_status") - .await - } } /// Main node health check. diff --git a/core/node/node_sync/src/testonly.rs b/core/node/node_sync/src/testonly.rs index b9e1adc995a..16027a71a25 100644 --- a/core/node/node_sync/src/testonly.rs +++ b/core/node/node_sync/src/testonly.rs @@ -71,18 +71,6 @@ impl MainNodeClient for MockMainNodeClient { Ok(Some(block)) } - async fn fetch_consensus_genesis( - &self, - ) -> EnrichedClientResult> { - unimplemented!() - } - - async fn fetch_attestation_status( - &self, - ) -> EnrichedClientResult> { - unimplemented!() - } - async fn fetch_genesis_config(&self) -> EnrichedClientResult { Ok(mock_genesis_config()) } diff --git a/core/node/proof_data_handler/Cargo.toml b/core/node/proof_data_handler/Cargo.toml index 31a0e8437ba..82063b23fdb 100644 --- a/core/node/proof_data_handler/Cargo.toml +++ b/core/node/proof_data_handler/Cargo.toml @@ -16,7 +16,6 @@ zksync_config.workspace = true zksync_dal.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true -zksync_tee_verifier.workspace = true zksync_types.workspace = true anyhow.workspace = true axum.workspace = true diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 0ce8c06be0e..23aec8af49f 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -117,7 +117,7 @@ pub async fn fund(pool: &ConnectionPool, addresses: &[Address]) { pub(crate) const DEFAULT_GAS_PER_PUBDATA: u32 = 10000; -pub(crate) fn fee(gas_limit: u32) -> Fee { +pub fn fee(gas_limit: u32) -> Fee { Fee { gas_limit: U256::from(gas_limit), max_fee_per_gas: SYSTEM_CONTEXT_MINIMAL_BASE_FEE.into(), diff --git a/core/tests/revert-test/tests/revert-and-restart-en.test.ts b/core/tests/revert-test/tests/revert-and-restart-en.test.ts index e1694418db1..42fa01a02c9 100644 --- a/core/tests/revert-test/tests/revert-and-restart-en.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart-en.test.ts @@ -4,79 +4,34 @@ // main_contract.getTotalBatchesCommitted actually checks the number of batches committed. // main_contract.getTotalBatchesExecuted actually checks the number of batches executed. import * as utils from 'utils'; -import { Tester } from './tester'; -import { exec, runServerInBackground, runExternalNodeInBackground } from './utils'; +import { + checkRandomTransfer, + executeDepositAfterRevert, + executeRevert, + Node, + NodeSpawner, + NodeType, + waitToCommitBatchesWithoutExecution, + waitToExecuteBatch +} from './utils'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; -import { expect, assert } from 'chai'; -import fs from 'fs'; +import { assert, expect } from 'chai'; +import fs from 'node:fs/promises'; import * as child_process from 'child_process'; import * as dotenv from 'dotenv'; -import { - getAllConfigsPath, - loadConfig, - shouldLoadConfigFromFile, - replaceAggregatedBlockExecuteDeadline -} from 'utils/build/file-configs'; +import { loadConfig, replaceAggregatedBlockExecuteDeadline, shouldLoadConfigFromFile } from 'utils/build/file-configs'; import path from 'path'; -import { ChildProcessWithoutNullStreams } from 'child_process'; import { logsTestPath } from 'utils/build/logs'; -import { killPidWithAllChilds } from 'utils/build/kill'; +import { IZkSyncHyperchain } from 'zksync-ethers/build/typechain'; const pathToHome = path.join(__dirname, '../../../..'); const fileConfig = shouldLoadConfigFromFile(); -let mainEnv: string; -let extEnv: string; - -let deploymentMode: string; - -if (fileConfig.loadFromFile) { - const genesisConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'genesis.yaml' }); - deploymentMode = genesisConfig.deploymentMode; -} else { - if (!process.env.DEPLOYMENT_MODE) { - throw new Error('DEPLOYMENT_MODE is not set'); - } - if (!['Validium', 'Rollup'].includes(process.env.DEPLOYMENT_MODE)) { - throw new Error(`Unknown deployment mode: ${process.env.DEPLOYMENT_MODE}`); - } - deploymentMode = process.env.DEPLOYMENT_MODE; -} - -if (deploymentMode == 'Validium') { - mainEnv = process.env.IN_DOCKER ? 'dev_validium_docker' : 'dev_validium'; - extEnv = process.env.IN_DOCKER ? 'ext-node-validium-docker' : 'ext-node-validium'; -} else { - // Rollup deployment mode - mainEnv = process.env.IN_DOCKER ? 'docker' : 'dev'; - extEnv = process.env.IN_DOCKER ? 'ext-node-docker' : 'ext-node'; -} - async function logsPath(name: string): Promise { return await logsTestPath(fileConfig.chain, 'logs/revert/en', name); } -interface SuggestedValues { - lastExecutedL1BatchNumber: bigint; - nonce: number; - priorityFee: number; -} - -// Parses output of "print-suggested-values" command of the revert block tool. -function parseSuggestedValues(jsonString: string): SuggestedValues { - const json = JSON.parse(jsonString); - assert(json && typeof json === 'object'); - assert(Number.isInteger(json.last_executed_l1_batch_number)); - assert(Number.isInteger(json.nonce)); - assert(Number.isInteger(json.priority_fee)); - return { - lastExecutedL1BatchNumber: BigInt(json.last_executed_l1_batch_number), - nonce: json.nonce, - priorityFee: json.priority_fee - }; -} - function run(cmd: string, args: string[], options: child_process.SpawnOptions): child_process.SpawnSyncReturns { let res = child_process.spawnSync(cmd, args, options); expect(res.error).to.be.undefined; @@ -94,7 +49,7 @@ function compileBinaries() { // Fetches env vars for the given environment (like 'dev', 'ext-node'). // TODO: it would be better to import zk tool code directly. -function fetchEnv(zksyncEnv: string): any { +function fetchEnv(zksyncEnv: string): Record { let res = run('./bin/zk', ['f', 'env'], { cwd: process.env.ZKSYNC_HOME, env: { @@ -106,218 +61,62 @@ function fetchEnv(zksyncEnv: string): any { return { ...process.env, ...dotenv.parse(res.stdout) }; } -async function runBlockReverter(args: string[]): Promise { - let env = fetchEnv(mainEnv); - - let fileConfigFlags = ''; +/** Loads env profiles for the main and external nodes */ +function loadEnvs() { + let deploymentMode: string; if (fileConfig.loadFromFile) { - const configPaths = getAllConfigsPath({ pathToHome, chain: fileConfig.chain }); - fileConfigFlags = ` - --config-path=${configPaths['general.yaml']} - --contracts-config-path=${configPaths['contracts.yaml']} - --secrets-path=${configPaths['secrets.yaml']} - --wallets-path=${configPaths['wallets.yaml']} - --genesis-path=${configPaths['genesis.yaml']} - `; - } - - const cmd = `cd ${pathToHome} && RUST_LOG=off cargo run --bin block_reverter --release -- ${args.join( - ' ' - )} ${fileConfigFlags}`; - const executedProcess = await exec(cmd, { - cwd: env.ZKSYNC_HOME, - env: { - ...env, - PATH: process.env.PATH - } - }); - - return executedProcess.stdout; -} - -async function killServerAndWaitForShutdown(proc: MainNode | ExtNode) { - await proc.terminate(); - // Wait until it's really stopped. - let iter = 0; - while (iter < 30) { - try { - await proc.tester.syncWallet.provider.getBlockNumber(); - await utils.sleep(2); - iter += 1; - } catch (_) { - // When exception happens, we assume that server died. - return; + const genesisConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'genesis.yaml' }); + deploymentMode = genesisConfig.deploymentMode; + } else { + deploymentMode = process.env.DEPLOYMENT_MODE ?? 'Rollup'; + if (!['Validium', 'Rollup'].includes(deploymentMode)) { + throw new Error(`Unknown deployment mode: ${deploymentMode}`); } } - // It's going to panic anyway, since the server is a singleton entity, so better to exit early. - throw new Error("Server didn't stop after a kill request"); -} - -class MainNode { - constructor(public tester: Tester, public proc: ChildProcessWithoutNullStreams, public zkInception: boolean) {} - - public async terminate() { - try { - await killPidWithAllChilds(this.proc.pid!, 9); - } catch (err) { - console.log(`ignored error: ${err}`); - } - } - - // Terminates all main node processes running. - // - // WARNING: This is not safe to use when running nodes on multiple chains. - public static async terminateAll() { - try { - await utils.exec('killall -INT zksync_server'); - } catch (err) { - console.log(`ignored error: ${err}`); - } + console.log(`Using deployment mode: ${deploymentMode}`); + + let mainEnvName: string; + let extEnvName: string; + if (deploymentMode === 'Validium') { + mainEnvName = process.env.IN_DOCKER ? 'dev_validium_docker' : 'dev_validium'; + extEnvName = process.env.IN_DOCKER ? 'ext-node-validium-docker' : 'ext-node-validium'; + } else { + // Rollup deployment mode + mainEnvName = process.env.IN_DOCKER ? 'docker' : 'dev'; + extEnvName = process.env.IN_DOCKER ? 'ext-node-docker' : 'ext-node'; } - // Spawns a main node. - // if enableConsensus is set, consensus component will be started in the main node. - // if enableExecute is NOT set, main node will NOT send L1 transactions to execute L1 batches. - public static async spawn( - logs: fs.WriteStream, - enableConsensus: boolean, - enableExecute: boolean, - ethClientWeb3Url: string, - apiWeb3JsonRpcHttpUrl: string, - baseTokenAddress: string - ): Promise { - let env = fetchEnv(mainEnv); - env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = enableExecute ? '1' : '10000'; - // Set full mode for the Merkle tree as it is required to get blocks committed. - env.DATABASE_MERKLE_TREE_MODE = 'full'; - - if (fileConfig.loadFromFile) { - replaceAggregatedBlockExecuteDeadline(pathToHome, fileConfig, enableExecute ? 1 : 10000); - } - - let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads'; - if (enableConsensus) { - components += ',consensus'; - } - if (baseTokenAddress != zksync.utils.LEGACY_ETH_ADDRESS) { - components += ',base_token_ratio_persister'; - } - let proc = runServerInBackground({ - components: [components], - stdio: ['ignore', logs, logs], - cwd: pathToHome, - env: env, - useZkInception: fileConfig.loadFromFile, - chain: fileConfig.chain - }); - - // Wait until the main node starts responding. - let tester: Tester = await Tester.init(ethClientWeb3Url, apiWeb3JsonRpcHttpUrl, baseTokenAddress); - while (true) { - try { - console.log(`Web3 ${apiWeb3JsonRpcHttpUrl}`); - await tester.syncWallet.provider.getBridgehubContractAddress(); - break; - } catch (err) { - if (proc.exitCode != null) { - assert.fail(`server failed to start, exitCode = ${proc.exitCode}`); - } - console.log('MainNode waiting for api endpoint'); - await utils.sleep(1); - } - } - return new MainNode(tester, proc, fileConfig.loadFromFile); - } -} - -class ExtNode { - constructor(public tester: Tester, private proc: child_process.ChildProcess, public zkInception: boolean) {} - - public async terminate() { - try { - await killPidWithAllChilds(this.proc.pid!, 9); - } catch (err) { - console.log(`ignored error: ${err}`); - } - } - - // Terminates all main node processes running. - // - // WARNING: This is not safe to use when running nodes on multiple chains. - public static async terminateAll() { - try { - await utils.exec('killall -INT zksync_external_node'); - } catch (err) { - console.log(`ignored error: ${err}`); - } - } - - // Spawns an external node. - // If enableConsensus is set, the node will use consensus P2P network to fetch blocks. - public static async spawn( - logs: fs.WriteStream, - enableConsensus: boolean, - ethClientWeb3Url: string, - enEthClientUrl: string, - baseTokenAddress: string - ): Promise { - let env = fetchEnv(extEnv); - let args = []; - if (enableConsensus) { - args.push('--enable-consensus'); - } - - // Run server in background. - let proc = runExternalNodeInBackground({ - stdio: ['ignore', logs, logs], - cwd: pathToHome, - env: env, - useZkInception: fileConfig.loadFromFile, - chain: fileConfig.chain - }); - - // Wait until the node starts responding. - let tester: Tester = await Tester.init(ethClientWeb3Url, enEthClientUrl, baseTokenAddress); - while (true) { - try { - await tester.syncWallet.provider.getBlockNumber(); - break; - } catch (err) { - if (proc.exitCode != null) { - assert.fail(`node failed to start, exitCode = ${proc.exitCode}`); - } - console.log('ExtNode waiting for api endpoint'); - await utils.sleep(1); - } - } - return new ExtNode(tester, proc, fileConfig.loadFromFile); - } - - // Waits for the node process to exit. - public async waitForExit(): Promise { - while (this.proc.exitCode === null) { - await utils.sleep(1); - } - return this.proc.exitCode; - } + console.log(`Fetching main node env: ${mainEnvName}`); + const mainEnv = fetchEnv(mainEnvName); + console.log(`Fetching EN env: ${extEnvName}`); + const extEnv = fetchEnv(extEnvName); + return [mainEnv, extEnv]; } describe('Block reverting test', function () { - let ethClientWeb3Url: string; - let apiWeb3JsonRpcHttpUrl: string; - let baseTokenAddress: string; - let enEthClientUrl: string; let operatorAddress: string; - let mainLogs: fs.WriteStream; - let extLogs: fs.WriteStream; let depositAmount: bigint; - let enableConsensus: boolean; - let mainNode: MainNode; - let extNode: ExtNode; + let mainNodeSpawner: NodeSpawner; + let mainEnv: Record; + let mainNode: Node; + let extNodeSpawner: NodeSpawner; + let extNode: Node; + let mainContract: IZkSyncHyperchain; + let alice: zksync.Wallet; + let depositL1BatchNumber: number; + let batchesCommittedBeforeRevert: bigint; const autoKill: boolean = !fileConfig.loadFromFile || !process.env.NO_KILL; before('initialize test', async () => { + let ethClientWeb3Url: string; + let apiWeb3JsonRpcHttpUrl: string; + let baseTokenAddress: string; + let enEthClientUrl: string; + + let extEnv; + [mainEnv, extEnv] = loadEnvs(); + if (fileConfig.loadFromFile) { const secretsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'secrets.yaml' }); const generalConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'general.yaml' }); @@ -336,223 +135,143 @@ describe('Block reverting test', function () { enEthClientUrl = externalNodeGeneralConfig.api.web3_json_rpc.http_url; operatorAddress = walletsConfig.operator.address; } else { - let env = fetchEnv(mainEnv); - ethClientWeb3Url = env.ETH_CLIENT_WEB3_URL; - apiWeb3JsonRpcHttpUrl = env.API_WEB3_JSON_RPC_HTTP_URL; - baseTokenAddress = env.CONTRACTS_BASE_TOKEN_ADDR; - enEthClientUrl = `http://127.0.0.1:${env.EN_HTTP_PORT}`; + ethClientWeb3Url = mainEnv.ETH_CLIENT_WEB3_URL!; + apiWeb3JsonRpcHttpUrl = mainEnv.API_WEB3_JSON_RPC_HTTP_URL!; + baseTokenAddress = mainEnv.CONTRACTS_BASE_TOKEN_ADDR!; + enEthClientUrl = `http://127.0.0.1:${extEnv.EN_HTTP_PORT!}`; // TODO use env variable for this? operatorAddress = '0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7'; } - mainLogs = fs.createWriteStream(await logsPath('server.log'), { flags: 'a' }); - extLogs = fs.createWriteStream(await logsPath('external_node.log'), { flags: 'a' }); + + const pathToMainLogs = await logsPath('server.log'); + const mainLogs = await fs.open(pathToMainLogs, 'a'); + console.log(`Writing main node logs to ${pathToMainLogs}`); + + const pathToEnLogs = await logsPath('external_node.log'); + const extLogs = await fs.open(pathToEnLogs, 'a'); + console.log(`Writing EN logs to ${pathToEnLogs}`); + if (process.env.SKIP_COMPILATION !== 'true' && !fileConfig.loadFromFile) { compileBinaries(); } - enableConsensus = process.env.ENABLE_CONSENSUS === 'true'; + const enableConsensus = process.env.ENABLE_CONSENSUS === 'true'; console.log(`enableConsensus = ${enableConsensus}`); depositAmount = ethers.parseEther('0.001'); - }); - - step('run', async () => { - if (autoKill) { - console.log('Make sure that nodes are not running'); - await ExtNode.terminateAll(); - await MainNode.terminateAll(); - } - console.log('Start main node'); - mainNode = await MainNode.spawn( - mainLogs, + const mainNodeSpawnOptions = { enableConsensus, - true, ethClientWeb3Url, apiWeb3JsonRpcHttpUrl, baseTokenAddress - ); - console.log('Start ext node'); - extNode = await ExtNode.spawn(extLogs, enableConsensus, ethClientWeb3Url, enEthClientUrl, baseTokenAddress); + }; + mainNodeSpawner = new NodeSpawner(pathToHome, mainLogs, fileConfig, mainNodeSpawnOptions, mainEnv); + const extNodeSpawnOptions = { + enableConsensus, + ethClientWeb3Url, + apiWeb3JsonRpcHttpUrl: enEthClientUrl, + baseTokenAddress + }; + extNodeSpawner = new NodeSpawner(pathToHome, extLogs, fileConfig, extNodeSpawnOptions, extEnv); + }); + step('Make sure that nodes are not running', async () => { + if (autoKill) { + await Node.killAll(NodeType.MAIN); + await Node.killAll(NodeType.EXT); + } + }); + + step('Start main node', async () => { + mainNode = await mainNodeSpawner.spawnMainNode(true); + }); + + step('Start external node', async () => { + extNode = await extNodeSpawner.spawnExtNode(); + }); + + step('Fund wallets', async () => { await mainNode.tester.fundSyncWallet(); + mainContract = await mainNode.tester.syncWallet.getMainContract(); await extNode.tester.fundSyncWallet(); + alice = extNode.tester.emptyWallet(); + }); - const main_contract = await mainNode.tester.syncWallet.getMainContract(); - const baseToken = await mainNode.tester.syncWallet.getBaseToken(); - const isETHBasedChain = baseToken === zksync.utils.ETH_ADDRESS_IN_CONTRACTS; - const alice: zksync.Wallet = extNode.tester.emptyWallet(); + step('Seal L1 batch', async () => { + depositL1BatchNumber = await extNode.createBatchWithDeposit(alice.address, depositAmount); + }); - console.log( - 'Finalize an L1 transaction to ensure at least 1 executed L1 batch and that all transactions are processed' - ); + step('wait for L1 batch to get executed', async () => { + await waitToExecuteBatch(mainContract, depositL1BatchNumber); + }); - for (let iter = 0; iter < 30; iter++) { - try { - const h: zksync.types.PriorityOpResponse = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - await h.waitFinalize(); - break; - } catch (error: any) { - if (error.message == 'server shutting down') { - await utils.sleep(2); - continue; - } - } - } + step('Restart main node with batch execution turned off', async () => { + await mainNode.killAndWaitForShutdown(); + mainNode = await mainNodeSpawner.spawnMainNode(false); + }); - console.log('Restart the main node with L1 batch execution disabled.'); - await killServerAndWaitForShutdown(mainNode); - mainNode = await MainNode.spawn( - mainLogs, - enableConsensus, - false, - ethClientWeb3Url, - apiWeb3JsonRpcHttpUrl, - baseTokenAddress - ); + // FIXME: need 2 batches? + step('seal another L1 batch', async () => { + await extNode.createBatchWithDeposit(alice.address, depositAmount); + }); - console.log('Commit at least 2 L1 batches which are not executed'); - const lastExecuted = await main_contract.getTotalBatchesExecuted(); - // One is not enough to test the reversion of sk cache because - // it gets updated with some batch logs only at the start of the next batch. - const initialL1BatchNumber = await main_contract.getTotalBatchesCommitted(); - const firstDepositHandle = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - - await firstDepositHandle.wait(); - while ((await extNode.tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber) { - await utils.sleep(0.1); - } + step('check wallet balance', async () => { + const balance = await alice.getBalance(); + console.log(`Balance before revert: ${balance}`); + assert(balance === depositAmount * 2n, 'Incorrect balance after deposits'); + }); - const secondDepositHandle = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - await secondDepositHandle.wait(); - while ((await extNode.tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber + 1n) { - await utils.sleep(0.3); - } + step('wait for the new batch to be committed', async () => { + batchesCommittedBeforeRevert = await waitToCommitBatchesWithoutExecution(mainContract); + }); - const alice2 = await alice.getBalance(); - while (true) { - const lastCommitted = await main_contract.getTotalBatchesCommitted(); - console.log(`lastExecuted = ${lastExecuted}, lastCommitted = ${lastCommitted}`); - if (lastCommitted - lastExecuted >= 2n) { - console.log('Terminate the main node'); - await killServerAndWaitForShutdown(mainNode); - break; - } - await utils.sleep(0.3); - } + step('stop server', async () => { + await mainNode.killAndWaitForShutdown(); + }); - console.log('Ask block_reverter to suggest to which L1 batch we should revert'); - const values_json = await runBlockReverter([ - 'print-suggested-values', - '--json', - '--operator-address', - operatorAddress - ]); - console.log(`values = ${values_json}`); - const values = parseSuggestedValues(values_json); - assert(lastExecuted === values.lastExecutedL1BatchNumber); - - console.log('Send reverting transaction to L1'); - await runBlockReverter([ - 'send-eth-transaction', - '--l1-batch-number', - values.lastExecutedL1BatchNumber.toString(), - '--nonce', - values.nonce.toString(), - '--priority-fee-per-gas', - values.priorityFee.toString() - ]); - - console.log('Check that batches are reverted on L1'); - const lastCommitted2 = await main_contract.getTotalBatchesCommitted(); - console.log(`lastCommitted = ${lastCommitted2}, want ${lastExecuted}`); - assert(lastCommitted2 === lastExecuted); - - console.log('Rollback db'); - await runBlockReverter([ - 'rollback-db', - '--l1-batch-number', - values.lastExecutedL1BatchNumber.toString(), - '--rollback-postgres', - '--rollback-tree', - '--rollback-sk-cache', - '--rollback-vm-runners-cache' - ]); - - console.log('Start main node.'); - mainNode = await MainNode.spawn( - mainLogs, - enableConsensus, - true, - ethClientWeb3Url, - apiWeb3JsonRpcHttpUrl, - baseTokenAddress + step('revert batches', async () => { + await executeRevert( + pathToHome, + fileConfig.chain, + operatorAddress, + batchesCommittedBeforeRevert, + mainContract, + mainEnv ); + }); - console.log('Wait for the external node to detect reorg and terminate'); + step('restart server', async () => { + mainNode = await mainNodeSpawner.spawnMainNode(true); + }); + + step('Wait for EN to detect reorg and terminate', async () => { await extNode.waitForExit(); + }); - console.log('Restart external node and wait for it to revert.'); - extNode = await ExtNode.spawn(extLogs, enableConsensus, ethClientWeb3Url, enEthClientUrl, baseTokenAddress); - - console.log('Execute an L1 transaction'); - const depositHandle = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - - let l1TxResponse = await alice._providerL1().getTransaction(depositHandle.hash); - while (!l1TxResponse) { - console.log(`Deposit ${depositHandle.hash} is not visible to the L1 network; sleeping`); - await utils.sleep(1); - l1TxResponse = await alice._providerL1().getTransaction(depositHandle.hash); - } + step('Restart EN', async () => { + extNode = await extNodeSpawner.spawnExtNode(); + }); - // TODO: it would be nice to know WHY it "doesn't work well with block reversions" and what it actually means. - console.log( - "ethers doesn't work well with block reversions, so wait for the receipt before calling `.waitFinalize()`." - ); - const l2Tx = await alice._providerL2().getL2TransactionFromPriorityOp(l1TxResponse); - let receipt = null; - while (true) { - receipt = await extNode.tester.syncWallet.provider.getTransactionReceipt(l2Tx.hash); - if (receipt != null) { - break; - } + step('wait until last deposit is re-executed', async () => { + let balanceBefore; + let tryCount = 0; + while ((balanceBefore = await alice.getBalance()) !== 2n * depositAmount && tryCount < 30) { + console.log(`Balance after revert: ${balanceBefore}`); + tryCount++; await utils.sleep(1); } - await depositHandle.waitFinalize(); - expect(receipt.status).to.be.eql(1); - - // The reverted transactions are expected to be reexecuted before the next transaction is applied. - // Hence we compare the state against the alice2, rather than against alice3. - const alice4want = alice2 + depositAmount; - const alice4 = await alice.getBalance(); - console.log(`Alice's balance is ${alice4}, want ${alice4want}`); - assert(alice4 === alice4want); - - console.log('Execute an L2 transaction'); - await checkedRandomTransfer(alice, 1n); + assert(balanceBefore === 2n * depositAmount, 'Incorrect balance after revert'); + }); + + step('execute transaction after revert', async () => { + await executeDepositAfterRevert(extNode.tester, alice, depositAmount); + const balanceAfter = await alice.getBalance(); + console.log(`Balance after another deposit: ${balanceAfter}`); + assert(balanceAfter === depositAmount * 3n, 'Incorrect balance after another deposit'); + }); + + step('check random transfer', async () => { + await checkRandomTransfer(alice, 1n); }); after('terminate nodes', async () => { @@ -564,25 +283,3 @@ describe('Block reverting test', function () { } }); }); - -// Transfers amount from sender to a random wallet in an L2 transaction. -async function checkedRandomTransfer(sender: zksync.Wallet, amount: bigint) { - const senderBalanceBefore = await sender.getBalance(); - const receiver = zksync.Wallet.createRandom().connect(sender.provider); - const transferHandle = await sender.sendTransaction({ to: receiver.address, value: amount, type: 0 }); - - // ethers doesn't work well with block reversions, so we poll for the receipt manually. - let txReceipt = null; - do { - txReceipt = await sender.provider.getTransactionReceipt(transferHandle.hash); - await utils.sleep(1); - } while (txReceipt === null); - - const senderBalance = await sender.getBalance(); - const receiverBalance = await receiver.provider!.getBalance(receiver.address); - - expect(receiverBalance === amount, 'Failed updated the balance of the receiver').to.be.true; - - const spentAmount = txReceipt.gasUsed * transferHandle.gasPrice! + amount; - expect(senderBalance + spentAmount >= senderBalanceBefore, 'Failed to update the balance of the sender').to.be.true; -} diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index a01788284d2..163a7294b5f 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -1,104 +1,52 @@ import * as utils from 'utils'; -import { loadConfig, shouldLoadConfigFromFile, getAllConfigsPath } from 'utils/build/file-configs'; -import { runServerInBackground } from './utils'; -import { Tester } from './tester'; +import { loadConfig, shouldLoadConfigFromFile } from 'utils/build/file-configs'; +import { + checkRandomTransfer, + executeDepositAfterRevert, + executeRevert, + Node, + NodeSpawner, + NodeType, + waitToCommitBatchesWithoutExecution, + waitToExecuteBatch +} from './utils'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; -import { expect } from 'chai'; +import { assert } from 'chai'; import { IZkSyncHyperchain } from 'zksync-ethers/build/typechain'; import path from 'path'; -import { ChildProcessWithoutNullStreams } from 'child_process'; import fs from 'node:fs/promises'; import { logsTestPath } from 'utils/build/logs'; -import { killPidWithAllChilds } from 'utils/build/kill'; - -// Parses output of "print-suggested-values" command of the revert block tool. -function parseSuggestedValues(suggestedValuesString: string): { - lastL1BatchNumber: bigint; - nonce: bigint; - priorityFee: bigint; -} { - const json = JSON.parse(suggestedValuesString); - if (!json || typeof json !== 'object') { - throw new TypeError('suggested values are not an object'); - } - - const lastL1BatchNumber = json.last_executed_l1_batch_number; - if (!Number.isInteger(lastL1BatchNumber)) { - throw new TypeError('suggested `lastL1BatchNumber` is not an integer'); - } - const nonce = json.nonce; - if (!Number.isInteger(nonce)) { - throw new TypeError('suggested `nonce` is not an integer'); - } - const priorityFee = json.priority_fee; - if (!Number.isInteger(priorityFee)) { - throw new TypeError('suggested `priorityFee` is not an integer'); - } - - return { - lastL1BatchNumber: BigInt(lastL1BatchNumber), - nonce: BigInt(nonce), - priorityFee: BigInt(priorityFee) - }; -} - -async function killServerAndWaitForShutdown(tester: Tester, serverProcess?: ChildProcessWithoutNullStreams) { - if (!serverProcess) { - await utils.exec('killall -9 zksync_server').catch(ignoreError); - return; - } - await killPidWithAllChilds(serverProcess.pid!, 9).catch(ignoreError); - // Wait until it's really stopped. - let iter = 0; - while (iter < 30) { - try { - await tester.syncWallet.provider.getBlockNumber(); - await utils.sleep(2); - iter += 1; - } catch (_) { - // When exception happens, we assume that server died. - return; - } - } - // It's going to panic anyway, since the server is a singleton entity, so better to exit early. - throw new Error("Server didn't stop after a kill request"); -} function ignoreError(_err: any, context?: string) { const message = context ? `Error ignored (context: ${context}).` : 'Error ignored.'; console.info(message); } -const fileConfig = shouldLoadConfigFromFile(); -const depositAmount = ethers.parseEther('0.001'); - -async function logsPath(name: string): Promise { - return await logsTestPath(fileConfig.chain, 'logs/revert/', name); -} - describe('Block reverting test', function () { - let tester: Tester; let alice: zksync.Wallet; let mainContract: IZkSyncHyperchain; - let blocksCommittedBeforeRevert: bigint; - let logs: fs.FileHandle; + let depositL1BatchNumber: number; + let batchesCommittedBeforeRevert: bigint; + let mainLogs: fs.FileHandle; let operatorAddress: string; + let baseTokenAddress: string; let ethClientWeb3Url: string; let apiWeb3JsonRpcHttpUrl: string; - let serverProcess: ChildProcessWithoutNullStreams | undefined; + let mainNodeSpawner: NodeSpawner; + let mainNode: Node; + + const fileConfig = shouldLoadConfigFromFile(); const pathToHome = path.join(__dirname, '../../../..'); const autoKill: boolean = !fileConfig.loadFromFile || !process.env.NO_KILL; const enableConsensus = process.env.ENABLE_CONSENSUS == 'true'; - let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads'; - if (enableConsensus) { - components += ',consensus'; + const depositAmount = ethers.parseEther('0.001'); + + async function logsPath(name: string): Promise { + return await logsTestPath(fileConfig.chain, 'logs/revert/', name); } before('initialize test', async () => { - // Clone file configs if necessary - let baseTokenAddress: string; - if (!fileConfig.loadFromFile) { operatorAddress = process.env.ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR!; ethClientWeb3Url = process.env.ETH_CLIENT_WEB3_URL!; @@ -132,198 +80,107 @@ describe('Block reverting test', function () { baseTokenAddress = contractsConfig.l1.base_token_addr; } - // Create test wallets - tester = await Tester.init(ethClientWeb3Url, apiWeb3JsonRpcHttpUrl, baseTokenAddress); - alice = tester.emptyWallet(); + const pathToMainLogs = await logsPath('server.log'); + mainLogs = await fs.open(pathToMainLogs, 'a'); + console.log(`Writing server logs to ${pathToMainLogs}`); + + mainNodeSpawner = new NodeSpawner(pathToHome, mainLogs, fileConfig, { + enableConsensus, + ethClientWeb3Url, + apiWeb3JsonRpcHttpUrl, + baseTokenAddress + }); }); - step('run server and execute some transactions', async () => { + step('Make sure that the server is not running', async () => { if (autoKill) { // Make sure server isn't running. - await killServerAndWaitForShutdown(tester); - } - - // Run server in background. - logs = await fs.open(await logsPath('server.log'), 'a'); - serverProcess = runServerInBackground({ - components: [components], - stdio: ['ignore', logs, logs], - cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, - chain: fileConfig.chain - }); - - // Server may need some time to recompile if it's a cold run, so wait for it. - let iter = 0; - while (iter < 30 && !mainContract) { - try { - mainContract = await tester.syncWallet.getMainContract(); - } catch (err) { - ignoreError(err, 'waiting for server HTTP JSON-RPC to start'); - await utils.sleep(2); - iter += 1; - } - } - if (!mainContract) { - throw new Error('Server did not start'); - } - - await tester.fundSyncWallet(); - - // Seal 2 L1 batches. - // One is not enough to test the reversion of sk cache because - // it gets updated with some batch logs only at the start of the next batch. - const initialL1BatchNumber = await tester.web3Provider.getL1BatchNumber(); - const firstDepositHandle = await tester.syncWallet.deposit({ - token: tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : tester.baseTokenAddress, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - await firstDepositHandle.wait(); - while ((await tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber) { - await utils.sleep(1); + await Node.killAll(NodeType.MAIN); } - const secondDepositHandle = await tester.syncWallet.deposit({ - token: tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : tester.baseTokenAddress, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - await secondDepositHandle.wait(); - while ((await tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber + 1) { - await utils.sleep(1); - } - - const balance = await alice.getBalance(); - expect(balance === depositAmount * 2n, 'Incorrect balance after deposits').to.be.true; + }); - // Check L1 committed and executed blocks. - let blocksCommitted = await mainContract.getTotalBatchesCommitted(); - let blocksExecuted = await mainContract.getTotalBatchesExecuted(); - let tryCount = 0; - while (blocksCommitted === blocksExecuted && tryCount < 100) { - blocksCommitted = await mainContract.getTotalBatchesCommitted(); - blocksExecuted = await mainContract.getTotalBatchesExecuted(); - tryCount += 1; - await utils.sleep(1); - } - expect(blocksCommitted > blocksExecuted, 'There is no committed but not executed block').to.be.true; - blocksCommittedBeforeRevert = blocksCommitted; + step('start server', async () => { + mainNode = await mainNodeSpawner.spawnMainNode(true); + }); - // Stop server. - await killServerAndWaitForShutdown(tester, serverProcess!); + step('fund wallet', async () => { + await mainNode.tester.fundSyncWallet(); + mainContract = await mainNode.tester.syncWallet.getMainContract(); + alice = mainNode.tester.emptyWallet(); }); - step('revert blocks', async () => { - let fileConfigFlags = ''; - if (fileConfig.loadFromFile) { - const configPaths = getAllConfigsPath({ - pathToHome, - chain: fileConfig.chain - }); - fileConfigFlags = ` - --config-path=${configPaths['general.yaml']} - --contracts-config-path=${configPaths['contracts.yaml']} - --secrets-path=${configPaths['secrets.yaml']} - --wallets-path=${configPaths['wallets.yaml']} - --genesis-path=${configPaths['genesis.yaml']} - `; - } + // Seal 2 L1 batches. + // One is not enough to test the reversion of sk cache because + // it gets updated with some batch logs only at the start of the next batch. + step('seal L1 batch', async () => { + depositL1BatchNumber = await mainNode.createBatchWithDeposit(alice.address, depositAmount); + }); - const executedProcess = await utils.exec( - `cd ${pathToHome} && RUST_LOG=off cargo run --bin block_reverter --release -- print-suggested-values --json --operator-address ${operatorAddress} ${fileConfigFlags}` - // ^ Switch off logs to not pollute the output JSON - ); - const suggestedValuesOutput = executedProcess.stdout; - const { lastL1BatchNumber, nonce, priorityFee } = parseSuggestedValues(suggestedValuesOutput); - expect(lastL1BatchNumber < blocksCommittedBeforeRevert, 'There should be at least one block for revert').to.be - .true; + step('wait for an L1 batch to get executed', async () => { + await waitToExecuteBatch(mainContract, depositL1BatchNumber); + }); - console.log( - `Reverting with parameters: last unreverted L1 batch number: ${lastL1BatchNumber}, nonce: ${nonce}, priorityFee: ${priorityFee}` - ); + step('restart server with batch execution turned off', async () => { + await mainNode.killAndWaitForShutdown(); + mainNode = await mainNodeSpawner.spawnMainNode(false); + }); - console.log('Sending ETH transaction..'); - await utils.spawn( - `cd ${pathToHome} && cargo run --bin block_reverter --release -- send-eth-transaction --l1-batch-number ${lastL1BatchNumber} --nonce ${nonce} --priority-fee-per-gas ${priorityFee} ${fileConfigFlags}` - ); + step('seal another L1 batch', async () => { + await mainNode.createBatchWithDeposit(alice.address, depositAmount); + }); - console.log('Rolling back DB..'); - await utils.spawn( - `cd ${pathToHome} && cargo run --bin block_reverter --release -- rollback-db --l1-batch-number ${lastL1BatchNumber} --rollback-postgres --rollback-tree --rollback-sk-cache --rollback-vm-runners-cache ${fileConfigFlags}` - ); + step('check wallet balance', async () => { + const balance = await alice.getBalance(); + console.log(`Balance before revert: ${balance}`); + assert(balance === depositAmount * 2n, 'Incorrect balance after deposits'); + }); - let blocksCommitted = await mainContract.getTotalBatchesCommitted(); - expect(blocksCommitted === lastL1BatchNumber, 'Revert on contract was unsuccessful').to.be.true; + step('wait for the new batch to be committed', async () => { + batchesCommittedBeforeRevert = await waitToCommitBatchesWithoutExecution(mainContract); }); - step('execute transaction after revert', async () => { - // Run server. - serverProcess = runServerInBackground({ - components: [components], - stdio: ['ignore', logs, logs], - cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, - chain: fileConfig.chain - }); - await utils.sleep(30); + step('stop server', async () => { + await mainNode.killAndWaitForShutdown(); + }); - const balanceBefore = await alice.getBalance(); - expect(balanceBefore === depositAmount * 2n, 'Incorrect balance after revert').to.be.true; + step('revert batches', async () => { + await executeRevert(pathToHome, fileConfig.chain, operatorAddress, batchesCommittedBeforeRevert, mainContract); + }); - // Execute a transaction - const depositHandle = await tester.syncWallet.deposit({ - token: tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : tester.baseTokenAddress, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); + step('restart server', async () => { + mainNode = await mainNodeSpawner.spawnMainNode(true); + }); - let l1TxResponse = await alice._providerL1().getTransaction(depositHandle.hash); - while (!l1TxResponse) { - console.log(`Deposit ${depositHandle.hash} is not visible to the L1 network; sleeping`); + step('wait until last deposit is re-executed', async () => { + let balanceBefore; + let tryCount = 0; + while ((balanceBefore = await alice.getBalance()) !== 2n * depositAmount && tryCount < 30) { + console.log(`Balance after revert: ${balanceBefore}`); + tryCount++; await utils.sleep(1); - l1TxResponse = await alice._providerL1().getTransaction(depositHandle.hash); } + assert(balanceBefore === 2n * depositAmount, 'Incorrect balance after revert'); + }); - // ethers doesn't work well with block reversions, so wait for the receipt before calling `.waitFinalize()`. - const l2Tx = await alice._providerL2().getL2TransactionFromPriorityOp(l1TxResponse); - let receipt = null; - do { - receipt = await tester.syncWallet.provider.getTransactionReceipt(l2Tx.hash); - await utils.sleep(1); - } while (receipt == null); - - await depositHandle.waitFinalize(); - expect(receipt.status).to.be.eql(1); - + step('execute transaction after revert', async () => { + await executeDepositAfterRevert(mainNode.tester, alice, depositAmount); const balanceAfter = await alice.getBalance(); - expect(balanceAfter === depositAmount * 3n, 'Incorrect balance after another deposit').to.be.true; + console.log(`Balance after another deposit: ${balanceAfter}`); + assert(balanceAfter === depositAmount * 3n, 'Incorrect balance after another deposit'); }); step('execute transactions after simple restart', async () => { // Execute an L2 transaction - await checkedRandomTransfer(alice, 1n); + await checkRandomTransfer(alice, 1n); // Stop server. - await killServerAndWaitForShutdown(tester, serverProcess!); + await mainNode.killAndWaitForShutdown(); // Run again. - serverProcess = runServerInBackground({ - components: [components], - stdio: ['ignore', logs, logs], - cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, - chain: fileConfig.chain - }); - await utils.sleep(30); + mainNode = await mainNodeSpawner.spawnMainNode(true); // Trying to send a transaction from the same address again - await checkedRandomTransfer(alice, 1n); + await checkRandomTransfer(alice, 1n); }); after('Try killing server', async () => { @@ -332,29 +189,3 @@ describe('Block reverting test', function () { } }); }); - -async function checkedRandomTransfer(sender: zksync.Wallet, amount: bigint) { - const senderBalanceBefore = await sender.getBalance(); - const receiverHD = zksync.Wallet.createRandom(); - const receiver = new zksync.Wallet(receiverHD.privateKey, sender.provider); - const transferHandle = await sender.sendTransaction({ - to: receiver.address, - value: amount, - type: 0 - }); - - // ethers doesn't work well with block reversions, so we poll for the receipt manually. - let txReceipt = null; - do { - txReceipt = await sender.provider.getTransactionReceipt(transferHandle.hash); - await utils.sleep(1); - } while (txReceipt == null); - - const senderBalance = await sender.getBalance(); - const receiverBalance = await receiver.getBalance(); - - expect(receiverBalance === amount, 'Failed updated the balance of the receiver').to.be.true; - - const spentAmount = txReceipt.gasUsed * transferHandle.gasPrice! + amount; - expect(senderBalance + spentAmount >= senderBalanceBefore, 'Failed to update the balance of the sender').to.be.true; -} diff --git a/core/tests/revert-test/tests/utils.ts b/core/tests/revert-test/tests/utils.ts index 4e3e292da65..ea8a45b97c3 100644 --- a/core/tests/revert-test/tests/utils.ts +++ b/core/tests/revert-test/tests/utils.ts @@ -1,5 +1,13 @@ import { exec as _exec, spawn as _spawn, ChildProcessWithoutNullStreams, type ProcessEnvOptions } from 'child_process'; import { promisify } from 'util'; +import { assert, expect } from 'chai'; +import { FileConfig, getAllConfigsPath, replaceAggregatedBlockExecuteDeadline } from 'utils/build/file-configs'; +import { IZkSyncHyperchain } from 'zksync-ethers/build/typechain'; +import { Tester } from './tester'; +import { killPidWithAllChilds } from 'utils/build/kill'; +import * as utils from 'utils'; +import fs from 'node:fs/promises'; +import * as zksync from 'zksync-ethers'; // executes a command in background and returns a child process handle // by default pipes data to parent's stdio but this can be overridden @@ -100,3 +108,389 @@ export function exec(command: string, options: ProcessEnvOptions) { command = command.replace(/\n/g, ' '); return promisified(command, options); } + +export interface SuggestedValues { + lastExecutedL1BatchNumber: bigint; + nonce: number; + priorityFee: number; +} + +/** Parses output of "print-suggested-values" command of the revert block tool. */ +export function parseSuggestedValues(jsonString: string): SuggestedValues { + const json = JSON.parse(jsonString); + assert(json && typeof json === 'object'); + assert(Number.isInteger(json.last_executed_l1_batch_number)); + assert(Number.isInteger(json.nonce)); + assert(Number.isInteger(json.priority_fee)); + return { + lastExecutedL1BatchNumber: BigInt(json.last_executed_l1_batch_number), + nonce: json.nonce, + priorityFee: json.priority_fee + }; +} + +async function runBlockReverter( + pathToHome: string, + chain: string | undefined, + env: ProcessEnvOptions['env'] | undefined, + args: string[] +): Promise { + let fileConfigFlags = ''; + if (chain) { + const configPaths = getAllConfigsPath({ pathToHome, chain }); + fileConfigFlags = ` + --config-path=${configPaths['general.yaml']} + --contracts-config-path=${configPaths['contracts.yaml']} + --secrets-path=${configPaths['secrets.yaml']} + --wallets-path=${configPaths['wallets.yaml']} + --genesis-path=${configPaths['genesis.yaml']} + `; + } + + const cmd = `cd ${pathToHome} && RUST_LOG=off cargo run --bin block_reverter --release -- ${args.join( + ' ' + )} ${fileConfigFlags}`; + + const options = env + ? { + cwd: env.ZKSYNC_HOME, + env: { + ...env, + PATH: process.env.PATH + } + } + : {}; + const executedProcess = await exec(cmd, options); + return executedProcess.stdout; +} + +export async function executeRevert( + pathToHome: string, + chain: string | undefined, + operatorAddress: string, + batchesCommittedBeforeRevert: bigint, + mainContract: IZkSyncHyperchain, + env?: ProcessEnvOptions['env'] +) { + const suggestedValuesOutput = await runBlockReverter(pathToHome, chain, env, [ + 'print-suggested-values', + '--json', + '--operator-address', + operatorAddress + ]); + const values = parseSuggestedValues(suggestedValuesOutput); + assert( + values.lastExecutedL1BatchNumber < batchesCommittedBeforeRevert, + 'There should be at least one block for revert' + ); + + console.log('Reverting with parameters', values); + + console.log('Sending ETH transaction..'); + await runBlockReverter(pathToHome, chain, env, [ + 'send-eth-transaction', + '--l1-batch-number', + values.lastExecutedL1BatchNumber.toString(), + '--nonce', + values.nonce.toString(), + '--priority-fee-per-gas', + values.priorityFee.toString() + ]); + + console.log('Rolling back DB..'); + await runBlockReverter(pathToHome, chain, env, [ + 'rollback-db', + '--l1-batch-number', + values.lastExecutedL1BatchNumber.toString(), + '--rollback-postgres', + '--rollback-tree', + '--rollback-sk-cache', + '--rollback-vm-runners-cache' + ]); + + const blocksCommitted = await mainContract.getTotalBatchesCommitted(); + assert(blocksCommitted === values.lastExecutedL1BatchNumber, 'Revert on contract was unsuccessful'); +} + +export interface MainNodeSpawnOptions { + enableConsensus: boolean; + ethClientWeb3Url: string; + apiWeb3JsonRpcHttpUrl: string; + baseTokenAddress: string; +} + +export enum NodeType { + MAIN = 'zksync_server', + EXT = 'zksync_external_node' +} + +export class Node { + constructor( + public readonly tester: Tester, + private readonly proc: ChildProcessWithoutNullStreams, + private readonly type: TYPE + ) {} + + public async terminate() { + try { + await killPidWithAllChilds(this.proc.pid!, 9); + } catch (err) { + console.log(`ignored error: ${err}`); + } + } + + /** + * Terminates all main node processes running. + * + * WARNING: This is not safe to use when running nodes on multiple chains. + */ + public static async killAll(type: NodeType) { + try { + await utils.exec(`killall -KILL ${type}`); + } catch (err) { + console.log(`ignored error: ${err}`); + } + } + + /** Waits for the node process to exit. */ + public async waitForExit(): Promise { + while (this.proc.exitCode === null) { + await utils.sleep(1); + } + return this.proc.exitCode; + } + + public async killAndWaitForShutdown() { + await this.terminate(); + // Wait until it's really stopped. + let iter = 0; + while (iter < 30) { + try { + await this.tester.syncWallet.provider.getBlockNumber(); + await utils.sleep(2); + iter += 1; + } catch (_) { + // When exception happens, we assume that server died. + return; + } + } + // It's going to panic anyway, since the server is a singleton entity, so better to exit early. + throw new Error(`${this.type} didn't stop after a kill request`); + } + + public async createBatchWithDeposit(to: string, amount: bigint) { + const initialL1BatchNumber = await this.tester.web3Provider.getL1BatchNumber(); + console.log(`Initial L1 batch: ${initialL1BatchNumber}`); + + const depositHandle = await this.tester.syncWallet.deposit({ + token: this.tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : this.tester.baseTokenAddress, + amount, + to, + approveBaseERC20: true, + approveERC20: true + }); + + let depositBatchNumber; + while (!(depositBatchNumber = (await depositHandle.wait()).l1BatchNumber)) { + console.log('Deposit is not included in L1 batch; sleeping'); + await utils.sleep(1); + } + console.log(`Deposit was included into L1 batch ${depositBatchNumber}`); + expect(depositBatchNumber).to.be.greaterThan(initialL1BatchNumber); + return depositBatchNumber; + } +} + +export class NodeSpawner { + public constructor( + private readonly pathToHome: string, + private readonly logs: fs.FileHandle, + private readonly fileConfig: FileConfig, + private readonly options: MainNodeSpawnOptions, + private readonly env?: ProcessEnvOptions['env'] + ) {} + + public async spawnMainNode(enableExecute: boolean): Promise> { + const env = this.env ?? process.env; + env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = enableExecute ? '1' : '10000'; + // Set full mode for the Merkle tree as it is required to get blocks committed. + env.DATABASE_MERKLE_TREE_MODE = 'full'; + + const { fileConfig, pathToHome, options, logs } = this; + + if (fileConfig.loadFromFile) { + replaceAggregatedBlockExecuteDeadline(pathToHome, fileConfig, enableExecute ? 1 : 10000); + } + + let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads'; + if (options.enableConsensus) { + components += ',consensus'; + } + if (options.baseTokenAddress != zksync.utils.LEGACY_ETH_ADDRESS) { + components += ',base_token_ratio_persister'; + } + let proc = runServerInBackground({ + components: [components], + stdio: ['ignore', logs, logs], + cwd: pathToHome, + env: env, + useZkInception: fileConfig.loadFromFile, + chain: fileConfig.chain + }); + + // Wait until the main node starts responding. + const tester = await Tester.init( + options.ethClientWeb3Url, + options.apiWeb3JsonRpcHttpUrl, + options.baseTokenAddress + ); + await waitForNodeToStart(tester, proc, options.apiWeb3JsonRpcHttpUrl); + return new Node(tester, proc, NodeType.MAIN); + } + + public async spawnExtNode(): Promise> { + const env = this.env ?? process.env; + const { pathToHome, fileConfig, logs, options } = this; + + let args = []; // FIXME: unused + if (options.enableConsensus) { + args.push('--enable-consensus'); + } + + // Run server in background. + let proc = runExternalNodeInBackground({ + stdio: ['ignore', logs, logs], + cwd: pathToHome, + env, + useZkInception: fileConfig.loadFromFile, + chain: fileConfig.chain + }); + + const tester = await Tester.init( + options.ethClientWeb3Url, + options.apiWeb3JsonRpcHttpUrl, + options.baseTokenAddress + ); + await waitForNodeToStart(tester, proc, options.apiWeb3JsonRpcHttpUrl); + return new Node(tester, proc, NodeType.EXT); + } +} + +async function waitForNodeToStart(tester: Tester, proc: ChildProcessWithoutNullStreams, l2Url: string) { + while (true) { + try { + const blockNumber = await tester.syncWallet.provider.getBlockNumber(); + console.log(`Initialized node API on ${l2Url}; latest block: ${blockNumber}`); + break; + } catch (err) { + if (proc.exitCode != null) { + assert.fail(`server failed to start, exitCode = ${proc.exitCode}`); + } + console.log(`Node waiting for API on ${l2Url}`); + await utils.sleep(1); + } + } +} + +export async function waitToExecuteBatch(mainContract: IZkSyncHyperchain, latestBatch: number) { + let tryCount = 0; + const initialExecutedBatch = await mainContract.getTotalBatchesExecuted(); + console.log(`Initial executed L1 batch: ${initialExecutedBatch}`); + + if (initialExecutedBatch >= latestBatch) { + console.log('Latest batch is executed; no need to wait'); + return; + } + + let lastExecutedBatch; + while ( + (lastExecutedBatch = await mainContract.getTotalBatchesExecuted()) === initialExecutedBatch && + tryCount < 100 + ) { + console.log(`Last executed batch: ${lastExecutedBatch}`); + tryCount++; + await utils.sleep(1); + } + assert(lastExecutedBatch > initialExecutedBatch); +} + +export async function waitToCommitBatchesWithoutExecution(mainContract: IZkSyncHyperchain): Promise { + let batchesCommitted = await mainContract.getTotalBatchesCommitted(); + let batchesExecuted = await mainContract.getTotalBatchesExecuted(); + console.log(`Batches committed: ${batchesCommitted}, executed: ${batchesExecuted}`); + + let tryCount = 0; + while ((batchesExecuted === 0n || batchesCommitted === batchesExecuted) && tryCount < 100) { + await utils.sleep(1); + batchesCommitted = await mainContract.getTotalBatchesCommitted(); + batchesExecuted = await mainContract.getTotalBatchesExecuted(); + console.log(`Batches committed: ${batchesCommitted}, executed: ${batchesExecuted}`); + tryCount += 1; + } + expect(batchesCommitted > batchesExecuted, 'There is no committed but not executed batch').to.be.true; + return batchesCommitted; +} + +export async function executeDepositAfterRevert(tester: Tester, wallet: zksync.Wallet, amount: bigint) { + const depositHandle = await tester.syncWallet.deposit({ + token: tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : tester.baseTokenAddress, + amount, + to: wallet.address, + approveBaseERC20: true, + approveERC20: true + }); + + let l1TxResponse = await wallet._providerL1().getTransaction(depositHandle.hash); + while (!l1TxResponse) { + console.log(`Deposit ${depositHandle.hash} is not visible to the L1 network; sleeping`); + await utils.sleep(1); + l1TxResponse = await wallet._providerL1().getTransaction(depositHandle.hash); + } + console.log(`Got L1 deposit tx`, l1TxResponse); + + // ethers doesn't work well with block reversions, so wait for the receipt before calling `.waitFinalize()`. + const l2Tx = await wallet._providerL2().getL2TransactionFromPriorityOp(l1TxResponse); + let receipt = null; + while (receipt === null) { + console.log(`L2 deposit transaction ${l2Tx.hash} is not confirmed; sleeping`); + await utils.sleep(1); + receipt = await tester.syncWallet.provider.getTransactionReceipt(l2Tx.hash); + } + expect(receipt.status).to.be.eql(1); + console.log(`L2 deposit transaction ${l2Tx.hash} is confirmed`); + + await depositHandle.waitFinalize(); + console.log('New deposit is finalized'); +} + +export async function checkRandomTransfer(sender: zksync.Wallet, amount: bigint) { + const senderBalanceBefore = await sender.getBalance(); + console.log(`Sender's balance before transfer: ${senderBalanceBefore}`); + + const receiverHD = zksync.Wallet.createRandom(); + const receiver = new zksync.Wallet(receiverHD.privateKey, sender.provider); + const transferHandle = await sender.sendTransaction({ + to: receiver.address, + value: amount, + type: 0 + }); + + // ethers doesn't work well with block reversions, so we poll for the receipt manually. + let txReceipt = null; + while (txReceipt === null) { + console.log(`Transfer ${transferHandle.hash} is not confirmed, sleeping`); + await utils.sleep(1); + txReceipt = await sender.provider.getTransactionReceipt(transferHandle.hash); + } + + const senderBalance = await sender.getBalance(); + console.log(`Sender's balance after transfer: ${senderBalance}`); + const receiverBalance = await receiver.getBalance(); + console.log(`Receiver's balance after transfer: ${receiverBalance}`); + + assert(receiverBalance === amount, 'Failed updated the balance of the receiver'); + + const spentAmount = txReceipt.gasUsed * transferHandle.gasPrice! + amount; + console.log(`Expected spent amount: ${spentAmount}`); + assert(senderBalance + spentAmount >= senderBalanceBefore, 'Failed to update the balance of the sender'); +} diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index b20e9d1e37d..79789e74447 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -202,7 +202,7 @@ describe('web3 API compatibility tests', () => { test('Should test web3 response extensions', async () => { if (testMaster.isFastMode()) { - // This test requires a new L1 batch to be created, which may be very time consuming on stage. + // This test requires a new L1 batch to be created, which may be very time-consuming on stage. return; } @@ -333,7 +333,7 @@ describe('web3 API compatibility tests', () => { // Pubsub notifier is not reactive + tests are being run in parallel, so we can't expect that the next block // would be expected one. Instead, we just want to receive an event with the particular block number. - wsProvider.on('block', (block) => { + await wsProvider.on('block', (block) => { if (block >= currentBlock) { newBlock = block; } @@ -355,7 +355,6 @@ describe('web3 API compatibility tests', () => { // ...though the gap should not be *too* big. expect(newBlock).toBeLessThan(currentBlock + 100); await tx.wait(); // To not leave a hanging promise. - wsProvider.removeAllListeners(); await wsProvider.destroy(); }); @@ -368,7 +367,7 @@ describe('web3 API compatibility tests', () => { let newTxHash: string | null = null; // We can't use `once` as there may be other pending txs sent together with our one. - wsProvider.on('pending', async (txHash) => { + await wsProvider.on('pending', async (txHash) => { const tx = await alice.provider.getTransaction(txHash); // We're waiting for the exact transaction to appear. if (!tx || tx.to != uniqueRecipient) { @@ -392,7 +391,6 @@ describe('web3 API compatibility tests', () => { expect(newTxHash as string).toEqual(tx.hash); await tx.wait(); // To not leave a hanging promise. - wsProvider.removeAllListeners(); await wsProvider.destroy(); }); @@ -404,7 +402,7 @@ describe('web3 API compatibility tests', () => { // We're sending a few transfers from the wallet, so we'll use a new account to make event unique. let uniqueRecipient = testMaster.newEmptyAccount().address; - // Setup a filter for an ERC20 transfer. + // Set up a filter for an ERC20 transfer. const erc20TransferTopic = ethers.id('Transfer(address,address,uint256)'); let filter = { address: l2Token, @@ -414,15 +412,15 @@ describe('web3 API compatibility tests', () => { ethers.zeroPadValue(uniqueRecipient, 32) // Recipient ] }; - wsProvider.once(filter, (event) => { + await wsProvider.once(filter, (event) => { newEvent = event; }); - // Setup a filter that should not match anything. + // Set up a filter that should not match anything. let incorrectFilter = { address: alice.address }; - wsProvider.once(incorrectFilter, (_) => { + await wsProvider.once(incorrectFilter, (_) => { expect(null).fail('Found log for incorrect filter'); }); @@ -439,7 +437,6 @@ describe('web3 API compatibility tests', () => { expect((newEvent as any).transactionHash).toEqual(tx.hash); await tx.wait(); // To not leave a hanging promise. - wsProvider.removeAllListeners(); await wsProvider.destroy(); }); @@ -608,7 +605,7 @@ describe('web3 API compatibility tests', () => { // Pubsub notify is not reactive and may be laggy, so we want to increase the chances // for test to pass. So we try to sleep a few iterations until we receive expected amount - // of events. If we won't receive them, we continue and the test will fail anyway. + // of events. If we don't receive them, we continue and the test will fail anyway. const expectedTrivialEventsCount = 2; const expectedSimpleEventsCount = 2; const expectedIndexedEventsCount = 1; @@ -681,42 +678,9 @@ describe('web3 API compatibility tests', () => { ).resolves.toHaveProperty('result', expect.stringMatching(HEX_VALUE_REGEX)); }); - test('Should check API returns error when there are too many logs in eth_getLogs', async () => { - const contract = await deployContract(alice, contracts.events, []); - const maxLogsLimit = testMaster.environment().maxLogsLimit; - - // Send 3 transactions that emit `maxLogsLimit / 2` events. - const tx1 = await contract.emitManyEvents(maxLogsLimit / 2); - const tx1Receipt = await tx1.wait(); - - const tx2 = await contract.emitManyEvents(maxLogsLimit / 2); - await tx2.wait(); - - const tx3 = await contract.emitManyEvents(maxLogsLimit / 2); - const tx3Receipt = await tx3.wait(); - - // There are around `0.5 * maxLogsLimit` logs in [tx1Receipt.blockNumber, tx1Receipt.blockNumber] range, - // so query with such filter should succeed. - await expect( - alice.provider.getLogs({ - fromBlock: tx1Receipt.blockNumber, - toBlock: tx1Receipt.blockNumber - }) - ).resolves; - - // There are at least `1.5 * maxLogsLimit` logs in [tx1Receipt.blockNumber, tx3Receipt.blockNumber] range, - // so query with such filter should fail. - await expect( - alice.provider.getLogs({ - fromBlock: tx1Receipt.blockNumber, - toBlock: tx3Receipt.blockNumber - }) - ).rejects.toThrow(`Query returned more than ${maxLogsLimit} results.`); - }); - test('Should throw error for estimate gas for account with balance < tx.value', async () => { let poorBob = testMaster.newEmptyAccount(); - expect( + await expect( poorBob.estimateGas({ value: 1, to: alice.address }) ).toBeRejected(/*'insufficient balance for transfer'*/); }); @@ -860,7 +824,7 @@ describe('web3 API compatibility tests', () => { const getLogsByHash = (await alice.provider.getLogs({ blockHash: latestBlock.hash || undefined })).map((x) => { return new zksync.types.Log({ ...x, l1BatchNumber: 0 }, alice.provider); // Set bogus value. }); - await expect(getLogsByNumber).toEqual(getLogsByHash); + expect(getLogsByNumber).toEqual(getLogsByHash); // Check that incorrect queries are rejected. await expect( @@ -1030,7 +994,7 @@ describe('web3 API compatibility tests', () => { const incrementFunctionData = contract2.interface.encodeFunctionData('increment', [1]); // Assert that the estimation fails because the increment function is not present in contract1 - expect( + await expect( alice.provider.estimateGas({ to: contract1Address.toString(), data: incrementFunctionData diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index ffa28e4f109..0f70e751b84 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -280,9 +280,11 @@ describe('Upgrade test', function () { ); executeOperation = chainUpgradeCalldata; + console.log('Sending scheduleTransparentOperation'); await sendGovernanceOperation(stmUpgradeData.scheduleTransparentOperation); + console.log('Sending executeOperation'); await sendGovernanceOperation(stmUpgradeData.executeOperation); - + console.log('Sending chain admin operation'); await sendChainAdminOperation(setTimestampCalldata); // Wait for server to process L1 event. @@ -371,23 +373,25 @@ describe('Upgrade test', function () { }); async function sendGovernanceOperation(data: string) { - await ( - await ecosystemGovWallet.sendTransaction({ - to: await governanceContract.getAddress(), - data: data, - type: 0 - }) - ).wait(); + const transaction = await ecosystemGovWallet.sendTransaction({ + to: await governanceContract.getAddress(), + data: data, + type: 0 + }); + console.log(`Sent governance operation, tx_hash=${transaction.hash}, nonce=${transaction.nonce}`); + await transaction.wait(); + console.log(`Governance operation succeeded, tx_hash=${transaction.hash}`); } async function sendChainAdminOperation(data: string) { - await ( - await adminGovWallet.sendTransaction({ - to: await chainAdminContract.getAddress(), - data: data, - type: 0 - }) - ).wait(); + const transaction = await adminGovWallet.sendTransaction({ + to: await chainAdminContract.getAddress(), + data: data, + type: 0 + }); + console.log(`Sent chain admin operation, tx_hash=${transaction.hash}, nonce=${transaction.nonce}`); + await transaction.wait(); + console.log(`Chain admin operation succeeded, tx_hash=${transaction.hash}`); } }); diff --git a/docker-compose-cpu-runner.yml b/docker-compose-cpu-runner.yml index 08d01390d77..beb54f3ade9 100644 --- a/docker-compose-cpu-runner.yml +++ b/docker-compose-cpu-runner.yml @@ -2,7 +2,7 @@ version: '3.2' services: reth: restart: always - image: "ghcr.io/paradigmxyz/reth:v0.2.0-beta.2" + image: "ghcr.io/paradigmxyz/reth:v1.0.6" volumes: - type: bind source: ./volumes/reth/data diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index 92a7b0b0088..35a0faeb962 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -2,7 +2,7 @@ version: '3.2' services: reth: restart: always - image: "ghcr.io/paradigmxyz/reth:v0.2.0-beta.2" + image: "ghcr.io/paradigmxyz/reth:v1.0.6" volumes: - type: bind source: ./volumes/reth/data diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index bbd61715842..f95ae0d5f54 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -2,7 +2,7 @@ version: '3.2' services: reth: restart: always - image: "ghcr.io/paradigmxyz/reth:v0.2.0-beta.2" + image: "ghcr.io/paradigmxyz/reth:v1.0.6" volumes: - type: bind source: ./volumes/reth/data diff --git a/docker-compose.yml b/docker-compose.yml index 7751c99d68a..1e3a273ec9a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.2' services: reth: restart: always - image: "ghcr.io/paradigmxyz/reth:v0.2.0-beta.2" + image: "ghcr.io/paradigmxyz/reth:v1.0.6" ports: - 127.0.0.1:8545:8545 volumes: diff --git a/docker/proof-fri-gpu-compressor/Dockerfile b/docker/proof-fri-gpu-compressor/Dockerfile index a3d92d113cd..45f2ffa51b0 100644 --- a/docker/proof-fri-gpu-compressor/Dockerfile +++ b/docker/proof-fri-gpu-compressor/Dockerfile @@ -4,8 +4,7 @@ FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder ARG DEBIAN_FRONTEND=noninteractive ARG CUDA_ARCH=89 -ARG A100_CUDA_ARCH=80 -ENV CUDAARCHS=${CUDA_ARCH};${A100_CUDA_ARCH} +ENV CUDAARCHS=${CUDA_ARCH} RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ git \ pkg-config build-essential libclang-dev && \ diff --git a/etc/utils/src/file-configs.ts b/etc/utils/src/file-configs.ts index fad72901d15..374bf53f6be 100644 --- a/etc/utils/src/file-configs.ts +++ b/etc/utils/src/file-configs.ts @@ -2,18 +2,11 @@ import * as path from 'path'; import * as fs from 'fs'; import * as yaml from 'yaml'; -export function shouldLoadConfigFromFile() { +export type FileConfig = { loadFromFile: false; chain?: undefined } | { loadFromFile: true; chain: string }; + +export function shouldLoadConfigFromFile(): FileConfig { const chain = process.env.CHAIN_NAME; - if (chain) { - return { - loadFromFile: true, - chain - } as const; - } else { - return { - loadFromFile: false - } as const; - } + return chain ? { loadFromFile: true, chain } : { loadFromFile: false }; } export const configNames = [ diff --git a/etc/utils/src/logs.ts b/etc/utils/src/logs.ts index cdb26f5ad1b..7db54ef8600 100644 --- a/etc/utils/src/logs.ts +++ b/etc/utils/src/logs.ts @@ -1,7 +1,7 @@ import path from 'path'; import fs from 'node:fs/promises'; -const pathToHome = path.join(__dirname, '../../../..'); +const pathToHome = path.join(__dirname, '../../..'); export async function logsTestPath(chain: string | undefined, relativePath: string, name: string): Promise { chain = chain ? chain! : 'default'; diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 24e8638876b..e77bb4f488b 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -500,27 +500,6 @@ dependencies = [ "which", ] -[[package]] -name = "bindgen" -version = "0.65.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" -dependencies = [ - "bitflags 1.3.2", - "cexpr", - "clang-sys", - "lazy_static", - "lazycell", - "peeking_take_while", - "prettyplease", - "proc-macro2 1.0.85", - "quote 1.0.36", - "regex", - "rustc-hash", - "shlex", - "syn 2.0.66", -] - [[package]] name = "bindgen" version = "0.69.4" @@ -826,17 +805,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" -[[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - [[package]] name = "cc" version = "1.1.14" @@ -3238,12 +3206,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" -[[package]] -name = "leb128" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" - [[package]] name = "libc" version = "0.2.155" @@ -3266,22 +3228,6 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" -[[package]] -name = "librocksdb-sys" -version = "0.11.0+8.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" -dependencies = [ - "bindgen 0.65.1", - "bzip2-sys", - "cc", - "glob", - "libc", - "libz-sys", - "lz4-sys", - "zstd-sys", -] - [[package]] name = "libsqlite3-sys" version = "0.30.1" @@ -3293,17 +3239,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "libz-sys" -version = "1.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -3370,16 +3305,6 @@ dependencies = [ "logos-codegen", ] -[[package]] -name = "lz4-sys" -version = "1.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "match_cfg" version = "0.1.0" @@ -4957,16 +4882,6 @@ dependencies = [ "rustc-hex", ] -[[package]] -name = "rocksdb" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" -dependencies = [ - "libc", - "librocksdb-sys", -] - [[package]] name = "rsa" version = "0.9.6" @@ -7620,9 +7535,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" dependencies = [ "anyhow", "once_cell", @@ -7656,9 +7571,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" +checksum = "ace39bdf50b8421c4d546381fe1ecc5212f953ce61cf93d4fa69172078dbe4af" dependencies = [ "anyhow", "blst", @@ -7680,9 +7595,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" +checksum = "06277266e31efdc1465f6a27ce96c7435392a270978a91956b8a848732df2cfa" dependencies = [ "anyhow", "bit-vec", @@ -7702,9 +7617,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" +checksum = "9099b2295f550176d824b5287f2f31b7739c4d10247faec1132f1c6e9d18059c" dependencies = [ "anyhow", "async-trait", @@ -7722,9 +7637,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", "rand 0.8.5", @@ -7754,9 +7669,7 @@ dependencies = [ "serde_yaml", "tokio", "zksync_config", - "zksync_dal", "zksync_env_config", - "zksync_node_genesis", "zksync_protobuf", "zksync_protobuf_config", ] @@ -7824,7 +7737,6 @@ dependencies = [ "tracing", "vise", "zksync_basic_types", - "zksync_health_check", ] [[package]] @@ -7865,20 +7777,6 @@ dependencies = [ "zksync_types", ] -[[package]] -name = "zksync_health_check" -version = "0.1.0" -dependencies = [ - "async-trait", - "futures 0.3.30", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "vise", -] - [[package]] name = "zksync_kzg" version = "0.150.4" @@ -7896,25 +7794,6 @@ dependencies = [ "zkevm_circuits 0.150.4", ] -[[package]] -name = "zksync_merkle_tree" -version = "0.1.0" -dependencies = [ - "anyhow", - "leb128", - "once_cell", - "rayon", - "thiserror", - "thread_local", - "tracing", - "vise", - "zksync_crypto_primitives", - "zksync_prover_interface", - "zksync_storage", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_mini_merkle_tree" version = "0.1.0" @@ -7954,27 +7833,6 @@ dependencies = [ "zksync_vm_interface", ] -[[package]] -name = "zksync_node_genesis" -version = "0.1.0" -dependencies = [ - "anyhow", - "itertools 0.10.5", - "thiserror", - "tokio", - "tracing", - "vise", - "zksync_config", - "zksync_contracts", - "zksync_dal", - "zksync_eth_client", - "zksync_merkle_tree", - "zksync_multivm", - "zksync_system_constants", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_object_store" version = "0.1.0" @@ -8034,9 +7892,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" dependencies = [ "anyhow", "bit-vec", @@ -8055,9 +7913,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" dependencies = [ "anyhow", "heck 0.5.0", @@ -8252,18 +8110,6 @@ dependencies = [ "zksync_utils", ] -[[package]] -name = "zksync_storage" -version = "0.1.0" -dependencies = [ - "num_cpus", - "once_cell", - "rocksdb", - "thread_local", - "tracing", - "vise", -] - [[package]] name = "zksync_system_constants" version = "0.1.0" @@ -8314,7 +8160,6 @@ dependencies = [ "bigdecimal", "futures 0.3.30", "hex", - "itertools 0.10.5", "num", "once_cell", "reqwest 0.12.5", @@ -8471,13 +8316,3 @@ dependencies = [ "zksync_utils", "zksync_vlog", ] - -[[package]] -name = "zstd-sys" -version = "2.0.10+zstd.1.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" -dependencies = [ - "cc", - "pkg-config", -] diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index cd5d6a0b280..75859021979 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -6349,9 +6349,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" dependencies = [ "anyhow", "once_cell", @@ -6383,9 +6383,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", "rand", @@ -6434,9 +6434,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" dependencies = [ "anyhow", "bit-vec", @@ -6455,9 +6455,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" dependencies = [ "anyhow", "heck", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index 4a08776558e..e1ad63136af 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -30,7 +30,7 @@ types = { path = "crates/types" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } -zksync_protobuf = "=0.1.0-rc.11" +zksync_protobuf = "=0.1.0-rc.12" # External dependencies anyhow = "1.0.82" diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 25eeff40247..904b1421e3a 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -504,12 +504,19 @@ Run prover Possible values: `gateway`, `witness-generator`, `witness-vector-generator`, `prover`, `compressor`, `prover-job-monitor` +- `--docker` - Whether to run image of the component instead of binary. + + Possible values: `true`, `false` + - `--round ` Possible values: `all-rounds`, `basic-circuits`, `leaf-aggregation`, `node-aggregation`, `recursion-tip`, `scheduler` - `--threads ` +- `--max-allocation ` - in case you are running prover component, the value limits maximum + memory allocation of it in bytes. + ## `zk_inception prover init-bellman-cuda` Initialize bellman-cuda diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs index 6bdd62c1d48..751cc48074f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs @@ -1,8 +1,22 @@ +use anyhow::anyhow; use clap::{Parser, ValueEnum}; use common::{Prompt, PromptSelect}; +use config::ChainConfig; use strum::{EnumIter, IntoEnumIterator}; -use crate::messages::{MSG_ROUND_SELECT_PROMPT, MSG_RUN_COMPONENT_PROMPT, MSG_THREADS_PROMPT}; +use crate::{ + consts::{ + COMPRESSOR_BINARY_NAME, COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, + PROVER_GATEWAY_BINARY_NAME, PROVER_GATEWAY_DOCKER_IMAGE, PROVER_JOB_MONITOR_BINARY_NAME, + PROVER_JOB_MONITOR_DOCKER_IMAGE, WITNESS_GENERATOR_BINARY_NAME, + WITNESS_GENERATOR_DOCKER_IMAGE, WITNESS_VECTOR_GENERATOR_BINARY_NAME, + WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, + }, + messages::{ + MSG_ROUND_SELECT_PROMPT, MSG_RUN_COMPONENT_PROMPT, MSG_THREADS_PROMPT, + MSG_WITNESS_GENERATOR_ROUND_ERR, + }, +}; #[derive(Debug, Clone, Parser, Default)] pub struct ProverRunArgs { @@ -12,6 +26,10 @@ pub struct ProverRunArgs { pub witness_generator_args: WitnessGeneratorArgs, #[clap(flatten)] pub witness_vector_generator_args: WitnessVectorGeneratorArgs, + #[clap(flatten)] + pub fri_prover_args: FriProverRunArgs, + #[clap(long)] + pub docker: Option, } #[derive( @@ -32,6 +50,108 @@ pub enum ProverComponent { ProverJobMonitor, } +impl ProverComponent { + pub fn image_name(&self) -> &'static str { + match self { + Self::Gateway => PROVER_GATEWAY_DOCKER_IMAGE, + Self::WitnessGenerator => WITNESS_GENERATOR_DOCKER_IMAGE, + Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, + Self::Prover => PROVER_DOCKER_IMAGE, + Self::Compressor => COMPRESSOR_DOCKER_IMAGE, + Self::ProverJobMonitor => PROVER_JOB_MONITOR_DOCKER_IMAGE, + } + } + + pub fn binary_name(&self) -> &'static str { + match self { + Self::Gateway => PROVER_GATEWAY_BINARY_NAME, + Self::WitnessGenerator => WITNESS_GENERATOR_BINARY_NAME, + Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_BINARY_NAME, + Self::Prover => PROVER_BINARY_NAME, + Self::Compressor => COMPRESSOR_BINARY_NAME, + Self::ProverJobMonitor => PROVER_JOB_MONITOR_BINARY_NAME, + } + } + + pub fn get_application_args(&self, in_docker: bool) -> anyhow::Result> { + let mut application_args = vec![]; + + if self == &Self::Prover || self == &Self::Compressor { + if in_docker { + application_args.push("--gpus=all".to_string()); + } else { + application_args.push("--features=gpu".to_string()); + } + } + + Ok(application_args) + } + + pub fn get_additional_args( + &self, + in_docker: bool, + args: ProverRunArgs, + chain: &ChainConfig, + ) -> anyhow::Result> { + let mut additional_args = vec![]; + if in_docker { + additional_args.push("--config-path=/configs/general.yaml".to_string()); + additional_args.push("--secrets-path=/configs/secrets.yaml".to_string()); + } else { + let general_config = chain + .path_to_general_config() + .into_os_string() + .into_string() + .map_err(|_| anyhow!("Failed to convert path to string"))?; + let secrets_config = chain + .path_to_secrets_config() + .into_os_string() + .into_string() + .map_err(|_| anyhow!("Failed to convert path to string"))?; + + additional_args.push(format!("--config-path={}", general_config)); + additional_args.push(format!("--secrets-path={}", secrets_config)); + } + + match self { + Self::WitnessGenerator => { + additional_args.push( + match args + .witness_generator_args + .round + .expect(MSG_WITNESS_GENERATOR_ROUND_ERR) + { + WitnessGeneratorRound::AllRounds => "--all_rounds", + WitnessGeneratorRound::BasicCircuits => "--round=basic_circuits", + WitnessGeneratorRound::LeafAggregation => "--round=leaf_aggregation", + WitnessGeneratorRound::NodeAggregation => "--round=node_aggregation", + WitnessGeneratorRound::RecursionTip => "--round=recursion_tip", + WitnessGeneratorRound::Scheduler => "--round=scheduler", + } + .to_string(), + ); + } + Self::WitnessVectorGenerator => { + additional_args.push(format!( + "--threads={}", + args.witness_vector_generator_args.threads.unwrap_or(1) + )); + } + Self::Prover => { + if args.fri_prover_args.max_allocation.is_some() { + additional_args.push(format!( + "--max-allocation={}", + args.fri_prover_args.max_allocation.unwrap() + )); + }; + } + _ => {} + }; + + Ok(additional_args) + } +} + #[derive(Debug, Clone, Parser, Default)] pub struct WitnessGeneratorArgs { #[clap(long)] @@ -76,8 +196,15 @@ impl WitnessVectorGeneratorArgs { } } +#[derive(Debug, Clone, Parser, Default)] +pub struct FriProverRunArgs { + /// Memory allocation limit in bytes (for prover component) + #[clap(long)] + pub max_allocation: Option, +} + impl ProverRunArgs { - pub fn fill_values_with_prompt(&self) -> anyhow::Result { + pub fn fill_values_with_prompt(self) -> anyhow::Result { let component = self.component.unwrap_or_else(|| { PromptSelect::new(MSG_RUN_COMPONENT_PROMPT, ProverComponent::iter()).ask() }); @@ -90,10 +217,18 @@ impl ProverRunArgs { .witness_vector_generator_args .fill_values_with_prompt(component)?; + let docker = self.docker.unwrap_or_else(|| { + Prompt::new("Do you want to run Docker image for the component?") + .default("false") + .ask() + }); + Ok(ProverRunArgs { component: Some(component), witness_generator_args, witness_vector_generator_args, + fri_prover_args: self.fri_prover_args, + docker: Some(docker), }) } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs index a819c3322a8..78116e40d6c 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs @@ -1,22 +1,21 @@ -use anyhow::Context; +use std::path::PathBuf; + +use anyhow::{anyhow, Context}; use common::{check_prerequisites, cmd::Cmd, config::global_config, logger, GPU_PREREQUISITES}; -use config::{ChainConfig, EcosystemConfig}; +use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::{ - args::run::{ - ProverComponent, ProverRunArgs, WitnessGeneratorArgs, WitnessGeneratorRound, - WitnessVectorGeneratorArgs, - }, + args::run::{ProverComponent, ProverRunArgs}, utils::get_link_to_prover, }; use crate::messages::{ MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_PROVER_JOB_MONITOR, - MSG_RUNNING_WITNESS_GENERATOR, MSG_RUNNING_WITNESS_GENERATOR_ERR, - MSG_RUNNING_WITNESS_VECTOR_GENERATOR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, - MSG_WITNESS_GENERATOR_ROUND_ERR, + MSG_RUNNING_PROVER_JOB_MONITOR_ERR, MSG_RUNNING_WITNESS_GENERATOR, + MSG_RUNNING_WITNESS_GENERATOR_ERR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR, + MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, }; pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<()> { @@ -29,114 +28,110 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() let link_to_prover = get_link_to_prover(&ecosystem_config); shell.change_dir(link_to_prover.clone()); - match args.component { - Some(ProverComponent::Gateway) => run_gateway(shell, &chain)?, - Some(ProverComponent::WitnessGenerator) => { - run_witness_generator(shell, &chain, args.witness_generator_args)? + let component = args.component.context(anyhow!(MSG_MISSING_COMPONENT_ERR))?; + let in_docker = args.docker.unwrap_or(false); + + let application_args = component.get_application_args(in_docker)?; + let additional_args = component.get_additional_args(in_docker, args, &chain)?; + + let (message, error) = match component { + ProverComponent::WitnessGenerator => ( + MSG_RUNNING_WITNESS_GENERATOR, + MSG_RUNNING_WITNESS_GENERATOR_ERR, + ), + ProverComponent::WitnessVectorGenerator => ( + MSG_RUNNING_WITNESS_VECTOR_GENERATOR, + MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, + ), + ProverComponent::Prover => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + } + (MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR) } - Some(ProverComponent::WitnessVectorGenerator) => { - run_witness_vector_generator(shell, &chain, args.witness_vector_generator_args)? + ProverComponent::Compressor => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + shell.set_var( + "BELLMAN_CUDA_DIR", + ecosystem_config + .bellman_cuda_dir + .clone() + .expect(MSG_BELLMAN_CUDA_DIR_ERR), + ); + } + (MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR) } - Some(ProverComponent::Prover) => run_prover(shell, &chain)?, - Some(ProverComponent::Compressor) => run_compressor(shell, &chain, &ecosystem_config)?, - Some(ProverComponent::ProverJobMonitor) => run_prover_job_monitor(shell, &chain)?, - None => anyhow::bail!(MSG_MISSING_COMPONENT_ERR), + ProverComponent::ProverJobMonitor => ( + MSG_RUNNING_PROVER_JOB_MONITOR, + MSG_RUNNING_PROVER_JOB_MONITOR_ERR, + ), + ProverComponent::Gateway => (MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR), + }; + + if in_docker { + let path_to_configs = chain.configs.clone(); + let path_to_prover = get_link_to_prover(&ecosystem_config); + run_dockerized_component( + shell, + component.image_name(), + &application_args, + &additional_args, + message, + error, + &path_to_configs, + &path_to_prover, + )? + } else { + run_binary_component( + shell, + component.binary_name(), + &application_args, + &additional_args, + message, + error, + )? } Ok(()) } -fn run_gateway(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_PROVER_GATEWAY); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_prover_fri_gateway -- --config-path={config_path} --secrets-path={secrets_path}")); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_PROVER_GATEWAY_ERR) -} - -fn run_witness_generator( +#[allow(clippy::too_many_arguments)] +fn run_dockerized_component( shell: &Shell, - chain: &ChainConfig, - args: WitnessGeneratorArgs, + image_name: &str, + application_args: &[String], + args: &[String], + message: &'static str, + error: &'static str, + path_to_configs: &PathBuf, + path_to_prover: &PathBuf, ) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_WITNESS_GENERATOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - let round = args.round.expect(MSG_WITNESS_GENERATOR_ROUND_ERR); + logger::info(message); - let round_str = match round { - WitnessGeneratorRound::AllRounds => "--all_rounds", - WitnessGeneratorRound::BasicCircuits => "--round=basic_circuits", - WitnessGeneratorRound::LeafAggregation => "--round=leaf_aggregation", - WitnessGeneratorRound::NodeAggregation => "--round=node_aggregation", - WitnessGeneratorRound::RecursionTip => "--round=recursion_tip", - WitnessGeneratorRound::Scheduler => "--round=scheduler", - }; + let mut cmd = Cmd::new(cmd!( + shell, + "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name} {args...}" + )); - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_witness_generator -- {round_str} --config-path={config_path} --secrets-path={secrets_path}")); cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_WITNESS_GENERATOR_ERR) + cmd.run().context(error) } -fn run_witness_vector_generator( +fn run_binary_component( shell: &Shell, - chain: &ChainConfig, - args: WitnessVectorGeneratorArgs, + binary_name: &str, + application_args: &[String], + args: &[String], + message: &'static str, + error: &'static str, ) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_WITNESS_VECTOR_GENERATOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - let threads = args.threads.unwrap_or(1).to_string(); - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_witness_vector_generator -- --config-path={config_path} --secrets-path={secrets_path} --threads={threads}")); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR) -} - -fn run_prover(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { - check_prerequisites(shell, &GPU_PREREQUISITES, false); - logger::info(MSG_RUNNING_PROVER); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - let mut cmd = Cmd::new( - cmd!(shell, "cargo run --features gpu --release --bin zksync_prover_fri -- --config-path={config_path} --secrets-path={secrets_path}"), - ); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_PROVER_ERR) -} - -fn run_compressor( - shell: &Shell, - chain: &ChainConfig, - ecosystem: &EcosystemConfig, -) -> anyhow::Result<()> { - check_prerequisites(shell, &GPU_PREREQUISITES, false); - logger::info(MSG_RUNNING_COMPRESSOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - shell.set_var( - "BELLMAN_CUDA_DIR", - ecosystem - .bellman_cuda_dir - .clone() - .expect(MSG_BELLMAN_CUDA_DIR_ERR), - ); - - let mut cmd = Cmd::new(cmd!(shell, "cargo run --features gpu --release --bin zksync_proof_fri_compressor -- --config-path={config_path} --secrets-path={secrets_path}")); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_COMPRESSOR_ERR) -} - -fn run_prover_job_monitor(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_PROVER_JOB_MONITOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); + logger::info(message); - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_prover_job_monitor -- --config-path={config_path} --secrets-path={secrets_path}")); + let mut cmd = Cmd::new(cmd!( + shell, + "cargo run {application_args...} --release --bin {binary_name} -- {args...}" + )); cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_PROVER_JOB_MONITOR) + cmd.run().context(error) } diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index 7db976c6103..72c8948a65d 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -15,3 +15,18 @@ pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; /// Path to the JS runtime config for the dapp-portal docker container to be mounted to pub const PORTAL_DOCKER_CONFIG_PATH: &str = "/usr/src/app/dist/config.js"; pub const PORTAL_DOCKER_IMAGE: &str = "matterlabs/dapp-portal"; + +pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway:latest2.0"; +pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator:latest2.0"; +pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = + "matterlabs/witness-vector-generator:latest2.0"; +pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri:latest2.0"; +pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor:latest2.0"; +pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor:latest2.0"; + +pub const PROVER_GATEWAY_BINARY_NAME: &str = "zksync_prover_fri_gateway"; +pub const WITNESS_GENERATOR_BINARY_NAME: &str = "zksync_witness_generator"; +pub const WITNESS_VECTOR_GENERATOR_BINARY_NAME: &str = "zksync_witness_vector_generator"; +pub const PROVER_BINARY_NAME: &str = "zksync_prover_fri"; +pub const COMPRESSOR_BINARY_NAME: &str = "zksync_proof_fri_compressor"; +pub const PROVER_JOB_MONITOR_BINARY_NAME: &str = "zksync_prover_job_monitor"; diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 99af684010a..6f94a7b102a 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -311,6 +311,7 @@ pub(super) const MSG_GENERATING_SK_SPINNER: &str = "Generating setup keys..."; pub(super) const MSG_SK_GENERATED: &str = "Setup keys generated successfully"; pub(super) const MSG_MISSING_COMPONENT_ERR: &str = "Missing component"; pub(super) const MSG_RUNNING_PROVER_GATEWAY: &str = "Running gateway"; +pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR_ERR: &str = "Failed to run prover job monitor"; pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR: &str = "Running prover job monitor"; pub(super) const MSG_RUNNING_WITNESS_GENERATOR: &str = "Running witness generator"; pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR: &str = "Running witness vector generator"; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs index 292c7d7d715..435dddfc360 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs @@ -1,7 +1,7 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP}; +use crate::messages::{MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, MSG_TEST_PATTERN_HELP}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct IntegrationArgs { @@ -9,4 +9,6 @@ pub struct IntegrationArgs { pub external_node: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, + #[clap(short, long, help = MSG_TEST_PATTERN_HELP)] + pub test_pattern: Option, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs index e1ec932ca7f..fb3e1436acc 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs @@ -39,9 +39,13 @@ pub async fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { .init_test_wallet(&ecosystem_config, &chain_config) .await?; - let mut command = cmd!(shell, "yarn jest --forceExit --testTimeout 120000") - .env("CHAIN_NAME", ecosystem_config.current_chain()) - .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?); + let test_pattern = args.test_pattern; + let mut command = cmd!( + shell, + "yarn jest --forceExit --testTimeout 120000 -t {test_pattern...}" + ) + .env("CHAIN_NAME", ecosystem_config.current_chain()) + .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?); if args.external_node { command = command.env("EXTERNAL_NODE", format!("{:?}", args.external_node)) diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index ff9cc104a50..d64e87cd0eb 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -92,6 +92,8 @@ pub(super) const MSG_TEST_RUST_OPTIONS_HELP: &str = "Cargo test flags"; pub(super) const MSG_BUILD_ABOUT: &str = "Build all test dependencies"; pub(super) const MSG_TESTS_EXTERNAL_NODE_HELP: &str = "Run tests for external node"; pub(super) const MSG_NO_DEPS_HELP: &str = "Do not install or build dependencies"; +pub(super) const MSG_TEST_PATTERN_HELP: &str = + "Run just the tests matching a pattern. Same as the -t flag on jest."; pub(super) const MSG_NO_KILL_HELP: &str = "The test will not kill all the nodes during execution"; pub(super) const MSG_TESTS_RECOVERY_SNAPSHOT_HELP: &str = "Run recovery from a snapshot instead of genesis";