Skip to content

Commit

Permalink
chore(ci): Re-enable certain bb solidity ACIR tests (AztecProtocol/az…
Browse files Browse the repository at this point in the history
…tec-packages#5065)

It isn't clear after some of the recursion cleanup in
AztecProtocol/aztec-packages#4221 why
`double_verify_proof` is failing the solidity verifier.

`double_verify_proof` was being used as a recursive proof itself to be
verified inside of `double_verify_nested_proof`. I have renamed this
test to `double_verify_proof_recursive` to note that its proof should be
used as input to another circuit.

I have also included a new test `double_verify_proof` where we accept
two non-nested proofs and use the Keccak prover. This is what we were
previously expecting for `double_verify_proof`. I also brought back
`arretenberg-acir-tests-bb-sol` for a few tests.

---------

Co-authored-by: Maddiaa <[email protected]>
  • Loading branch information
AztecBot and Maddiaa0 committed Mar 8, 2024
1 parent fe8f277 commit 0d6e7fd
Show file tree
Hide file tree
Showing 21 changed files with 440 additions and 28 deletions.
2 changes: 1 addition & 1 deletion .aztec-sync-commit
Original file line number Diff line number Diff line change
@@ -1 +1 @@
7ff9b71d8d87fc93ae7dbd8ba63f5176b0cd17be
58e1ff4ecf8dbc5e4504994a9e22b04d09d0535d
20 changes: 20 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 3 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ members = [
"tooling/nargo_toml",
"tooling/noirc_abi",
"tooling/noirc_abi_wasm",
"tooling/acvm_cli",
# ACVM
"acvm-repo/acir_field",
"acvm-repo/acir",
Expand All @@ -36,7 +37,7 @@ members = [
"acvm-repo/blackbox_solver",
"acvm-repo/bn254_blackbox_solver",
]
default-members = ["tooling/nargo_cli"]
default-members = ["tooling/nargo_cli", "tooling/acvm_cli"]
resolver = "2"

[workspace.package]
Expand Down Expand Up @@ -78,6 +79,7 @@ noir_lsp = { path = "tooling/lsp" }
noir_debugger = { path = "tooling/debugger" }
noirc_abi = { path = "tooling/noirc_abi" }
bb_abstraction_leaks = { path = "tooling/bb_abstraction_leaks" }
acvm_cli = { path = "tooling/acvm_cli" }

# LSP
async-lsp = { version = "0.1.0", default-features = false }
Expand Down
8 changes: 7 additions & 1 deletion aztec_macros/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -727,8 +727,14 @@ fn transform_function(
/// Transform a function to work with AVM bytecode
fn transform_vm_function(
func: &mut NoirFunction,
_storage_defined: bool,
storage_defined: bool,
) -> Result<(), AztecMacroError> {
// Create access to storage
if storage_defined {
let storage = abstract_storage("public_vm", true);
func.def.body.0.insert(0, storage);
}

// Push Avm context creation to the beginning of the function
let create_context = create_avm_context()?;
func.def.body.0.insert(0, create_context);
Expand Down
3 changes: 2 additions & 1 deletion compiler/noirc_evaluator/src/brillig/brillig_ir.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ use num_bigint::BigUint;
/// The Brillig VM does not apply a limit to the memory address space,
/// As a convention, we take use 64 bits. This means that we assume that
/// memory has 2^64 memory slots.
pub(crate) const BRILLIG_MEMORY_ADDRESSING_BIT_SIZE: u32 = 32;
pub(crate) const BRILLIG_MEMORY_ADDRESSING_BIT_SIZE: u32 = 64;

// Registers reserved in runtime for special purposes.
pub(crate) enum ReservedRegisters {
Expand Down Expand Up @@ -562,6 +562,7 @@ impl BrilligContext {
bit_size: u32,
) {
self.debug_show.const_instruction(result, constant);

self.push_opcode(BrilligOpcode::Const { destination: result, value: constant, bit_size });
}

Expand Down
2 changes: 1 addition & 1 deletion docs/scripts/codegen_nargo_reference.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,4 @@ sidebar_position: 0
---
" > $NARGO_REFERENCE

cargo run -F codegen-docs -- info >> $NARGO_REFERENCE
cargo run --bin nargo -F codegen-docs -- info >> $NARGO_REFERENCE
42 changes: 21 additions & 21 deletions test_programs/execution_success/brillig_cow_regression/src/main.nr
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@ global MAX_NEW_CONTRACTS_PER_TX: u64 = 1;
global NUM_ENCRYPTED_LOGS_HASHES_PER_TX: u64 = 1;
global NUM_UNENCRYPTED_LOGS_HASHES_PER_TX: u64 = 1;
global NUM_FIELDS_PER_SHA256 = 2;
global CALLDATA_HASH_INPUT_SIZE = 169;
global CALL_DATA_HASH_LOG_FIELDS = 4;
global CALL_DATA_HASH_FULL_FIELDS = 165;
global TX_EFFECT_HASH_INPUT_SIZE = 169;
global TX_EFFECT_HASH_LOG_FIELDS = 4;
global TX_EFFECT_HASH_FULL_FIELDS = 165;

struct PublicDataUpdateRequest {
leaf_slot : Field,
Expand Down Expand Up @@ -99,7 +99,7 @@ impl U256 {
}

unconstrained fn main(kernel_data: DataToHash) -> pub [Field; NUM_FIELDS_PER_SHA256] {
let mut calldata_hash_inputs = [0; CALLDATA_HASH_INPUT_SIZE];
let mut tx_effects_hash_inputs = [0; TX_EFFECT_HASH_INPUT_SIZE];

let new_note_hashes = kernel_data.new_note_hashes;
let new_nullifiers = kernel_data.new_nullifiers;
Expand All @@ -111,65 +111,65 @@ unconstrained fn main(kernel_data: DataToHash) -> pub [Field; NUM_FIELDS_PER_SHA
let mut offset = 0;

for j in 0..MAX_NEW_NOTE_HASHES_PER_TX {
calldata_hash_inputs[offset + j] = new_note_hashes[j];
tx_effects_hash_inputs[offset + j] = new_note_hashes[j];
}
offset += MAX_NEW_NOTE_HASHES_PER_TX ;

for j in 0..MAX_NEW_NULLIFIERS_PER_TX {
calldata_hash_inputs[offset + j] = new_nullifiers[j];
tx_effects_hash_inputs[offset + j] = new_nullifiers[j];
}
offset += MAX_NEW_NULLIFIERS_PER_TX ;

for j in 0..MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX {
calldata_hash_inputs[offset + j * 2] =
tx_effects_hash_inputs[offset + j * 2] =
public_data_update_requests[j].leaf_slot;
calldata_hash_inputs[offset + j * 2 + 1] =
tx_effects_hash_inputs[offset + j * 2 + 1] =
public_data_update_requests[j].new_value;
}
offset += MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX * 2;

for j in 0..MAX_NEW_L2_TO_L1_MSGS_PER_TX {
calldata_hash_inputs[offset + j] = newL2ToL1msgs[j];
tx_effects_hash_inputs[offset + j] = newL2ToL1msgs[j];
}
offset += MAX_NEW_L2_TO_L1_MSGS_PER_TX;

let contract_leaf = kernel_data.new_contracts[0];
calldata_hash_inputs[offset] = contract_leaf.hash();
tx_effects_hash_inputs[offset] = contract_leaf.hash();

offset += MAX_NEW_CONTRACTS_PER_TX;

let new_contracts = kernel_data.new_contracts;
calldata_hash_inputs[offset] = new_contracts[0].contract_address;
tx_effects_hash_inputs[offset] = new_contracts[0].contract_address;

calldata_hash_inputs[offset + 1] = new_contracts[0].portal_contract_address;
tx_effects_hash_inputs[offset + 1] = new_contracts[0].portal_contract_address;

offset += MAX_NEW_CONTRACTS_PER_TX * 2;

for j in 0..NUM_FIELDS_PER_SHA256 {
calldata_hash_inputs[offset + j] = encryptedLogsHash[j];
tx_effects_hash_inputs[offset + j] = encryptedLogsHash[j];
}

offset += NUM_ENCRYPTED_LOGS_HASHES_PER_TX * NUM_FIELDS_PER_SHA256;

for j in 0..NUM_FIELDS_PER_SHA256 {
calldata_hash_inputs[offset + j] = unencryptedLogsHash[j];
tx_effects_hash_inputs[offset + j] = unencryptedLogsHash[j];
}

offset += NUM_UNENCRYPTED_LOGS_HASHES_PER_TX * NUM_FIELDS_PER_SHA256;
assert_eq(offset, CALLDATA_HASH_INPUT_SIZE); // Sanity check
assert_eq(offset, TX_EFFECT_HASH_INPUT_SIZE); // Sanity check

let mut hash_input_flattened = [0; CALL_DATA_HASH_FULL_FIELDS * 32 + CALL_DATA_HASH_LOG_FIELDS * 16];
for offset in 0..CALL_DATA_HASH_FULL_FIELDS {
let input_as_bytes = calldata_hash_inputs[offset].to_be_bytes(32);
let mut hash_input_flattened = [0; TX_EFFECT_HASH_FULL_FIELDS * 32 + TX_EFFECT_HASH_LOG_FIELDS * 16];
for offset in 0..TX_EFFECT_HASH_FULL_FIELDS {
let input_as_bytes = tx_effects_hash_inputs[offset].to_be_bytes(32);
for byte_index in 0..32 {
hash_input_flattened[offset * 32 + byte_index] = input_as_bytes[byte_index];
}
}

for log_field_index in 0..CALL_DATA_HASH_LOG_FIELDS {
let input_as_bytes = calldata_hash_inputs[CALL_DATA_HASH_FULL_FIELDS + log_field_index].to_be_bytes(16);
for log_field_index in 0..TX_EFFECT_HASH_LOG_FIELDS {
let input_as_bytes = tx_effects_hash_inputs[TX_EFFECT_HASH_FULL_FIELDS + log_field_index].to_be_bytes(16);
for byte_index in 0..16 {
hash_input_flattened[CALL_DATA_HASH_FULL_FIELDS * 32 + log_field_index * 16 + byte_index] = input_as_bytes[byte_index];
hash_input_flattened[TX_EFFECT_HASH_FULL_FIELDS * 32 + log_field_index * 16 + byte_index] = input_as_bytes[byte_index];
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,6 @@
name = "double_verify_proof"
type = "bin"
authors = [""]
[dependencies]
compiler_version = ">=0.24.0"

[dependencies]
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use dep::std;

#[recursive]
fn main(
verification_key: [Field; 114],
// This is the proof without public inputs attached.
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[package]
name = "double_verify_proof_recursive"
type = "bin"
authors = [""]
[dependencies]
Loading

0 comments on commit 0d6e7fd

Please sign in to comment.