diff --git a/Cargo.toml b/Cargo.toml index 050eaa95..89fe435f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,8 @@ thiserror = "1.0" group = "0.13.0" once_cell = "1.18.0" itertools = "0.12.0" +rand = "0.8.5" +ref-cast = "1.0.20" [target.'cfg(any(target_arch = "x86_64", target_arch = "aarch64"))'.dependencies] pasta-msm = { version = "0.1.4" } @@ -74,8 +76,15 @@ harness = false name = "ppsnark" harness = false + +[[bench]] +name = "pcs" +harness = false +required-features = ["bench"] + [features] default = ["halo2curves/asm"] +bench = [] # Compiles in portable mode, w/o ISA extensions => binary can be executed on all systems. portable = ["pasta-msm/portable"] cuda = ["neptune/cuda", "neptune/pasta", "neptune/arity24"] diff --git a/benches/compressed-snark.rs b/benches/compressed-snark.rs index 7da57365..b60e40c5 100644 --- a/benches/compressed-snark.rs +++ b/benches/compressed-snark.rs @@ -4,6 +4,7 @@ use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; use core::marker::PhantomData; use criterion::{measurement::WallTime, *}; use ff::PrimeField; +use halo2curves::bn256::Bn256; use nova_snark::{ provider::{Bn256EngineKZG, GrumpkinEngine}, traits::{ @@ -17,7 +18,7 @@ use std::time::Duration; type E1 = Bn256EngineKZG; type E2 = GrumpkinEngine; -type EE1 = nova_snark::provider::hyperkzg::EvaluationEngine; +type EE1 = nova_snark::provider::hyperkzg::EvaluationEngine; type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine; // SNARKs without computational commitments type S1 = nova_snark::spartan::snark::RelaxedR1CSSNARK; @@ -28,7 +29,7 @@ type SS2 = nova_snark::spartan::snark::RelaxedR1CSSNARK; type C1 = NonTrivialCircuit<::Scalar>; type C2 = TrivialCircuit<::Scalar>; -// To run these benchmarks, first download `criterion` with `cargo install cargo install cargo-criterion`. +// To run these benchmarks, first download `criterion` with `cargo install cargo-criterion`. // Then `cargo criterion --bench compressed-snark`. The results are located in `target/criterion/data/`. // For flamegraphs, run `cargo criterion --bench compressed-snark --features flamegraph -- --profile-time `. // The results are located in `target/criterion/profile/`. @@ -216,7 +217,7 @@ impl StepCircuit for NonTrivialCircuit { let mut y = x.clone(); for i in 0..self.num_cons { y = x.square(cs.namespace(|| format!("x_sq_{i}")))?; - x = y.clone(); + x.clone_from(&y); } Ok(vec![y]) } diff --git a/benches/compute-digest.rs b/benches/compute-digest.rs index 64cc4851..f8bdacdc 100644 --- a/benches/compute-digest.rs +++ b/benches/compute-digest.rs @@ -68,7 +68,7 @@ impl StepCircuit for NonTrivialCircuit { let mut y = x.clone(); for i in 0..self.num_cons { y = x.square(cs.namespace(|| format!("x_sq_{i}")))?; - x = y.clone(); + x.clone_from(&y); } Ok(vec![y]) } diff --git a/benches/pcs.rs b/benches/pcs.rs new file mode 100644 index 00000000..84f4313a --- /dev/null +++ b/benches/pcs.rs @@ -0,0 +1,199 @@ +use criterion::{criterion_group, criterion_main, Bencher, BenchmarkId, Criterion, SamplingMode}; +use ff::Field; +use halo2curves::bn256::Bn256; +use nova_snark::provider::{ + hyperkzg::EvaluationEngine as MLEvaluationEngine, + ipa_pc::EvaluationEngine as IPAEvaluationEngine, non_hiding_zeromorph::ZMPCS, Bn256Engine, + Bn256EngineKZG, Bn256EngineZM, +}; +use nova_snark::spartan::polys::multilinear::MultilinearPolynomial; +use nova_snark::traits::{ + commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine, + TranscriptEngineTrait, +}; +use rand::rngs::StdRng; +use rand_core::{CryptoRng, RngCore, SeedableRng}; +use std::any::type_name; +use std::time::Duration; + +// To run these benchmarks, first download `criterion` with `cargo install cargo-criterion`. +// Then `cargo criterion --bench pcs`. +// For flamegraphs, run `cargo criterion --bench pcs --features flamegraph -- --profile-time `. +// The results are located in `target/criterion/profile/`. +cfg_if::cfg_if! { + if #[cfg(feature = "flamegraph")] { + criterion_group! { + name = pcs; + config = Criterion::default().warm_up_time(Duration::from_millis(3000)).with_profiler(pprof::criterion::PProfProfiler::new(100, pprof::criterion::Output::Flamegraph(None))); + targets = bench_pcs + } + } else { + criterion_group! { + name = pcs; + config = Criterion::default().warm_up_time(Duration::from_millis(3000)); + targets = bench_pcs + } + } +} + +criterion_main!(pcs); + +const NUM_VARS_TEST_VECTOR: [usize; 6] = [10, 12, 14, 16, 18, 20]; + +struct BenchAssests> { + poly: MultilinearPolynomial<::Scalar>, + point: Vec<::Scalar>, + eval: ::Scalar, + ck: <::CE as CommitmentEngineTrait>::CommitmentKey, + commitment: <::CE as CommitmentEngineTrait>::Commitment, + prover_key: >::ProverKey, + verifier_key: >::VerifierKey, + proof: Option<>::EvaluationArgument>, +} + +/// Returns a random polynomial, a point and calculate its evaluation. +pub fn random_poly_with_eval( + num_vars: usize, + mut rng: &mut R, +) -> ( + MultilinearPolynomial<::Scalar>, + Vec<::Scalar>, + ::Scalar, +) { + // Generate random polynomial and point. + let poly = MultilinearPolynomial::random(num_vars, &mut rng); + let point = (0..num_vars) + .map(|_| ::Scalar::random(&mut rng)) + .collect::>(); + + // Calculation evaluation of point over polynomial. + let eval = MultilinearPolynomial::evaluate_with(poly.evaluations(), &point); + + (poly, point, eval) +} + +impl> BenchAssests { + pub(crate) fn from_num_vars(num_vars: usize, rng: &mut R) -> Self { + let (poly, point, eval) = random_poly_with_eval::(num_vars, rng); + + // Mock commitment key. + let ck = E::CE::setup(b"test", 1 << num_vars); + // Commits to the provided vector using the provided generators. + let commitment = E::CE::commit(&ck, poly.evaluations()); + + let (prover_key, verifier_key) = EE::setup(&ck); + + // Generate proof so that we can bench verification. + let proof = EE::prove( + &ck, + &prover_key, + &mut E::TE::new(b"TestEval"), + &commitment, + poly.evaluations(), + &point, + &eval, + ) + .unwrap(); + + Self { + poly, + point, + eval, + ck, + commitment, + prover_key, + verifier_key, + proof: Some(proof), + } + } +} + +// Macro to generate benchmark code for multiple evaluation engine types +macro_rules! benchmark_all_engines { + ($criterion:expr, $test_vector:expr, $proving_fn:expr, $verifying_fn:expr, $( ($assets:ident, $eval_engine:ty) ),*) => { + for num_vars in $test_vector.iter() { + let mut rng = rand::rngs::StdRng::seed_from_u64(*num_vars as u64); + + $( + let $assets: BenchAssests<_, $eval_engine> = BenchAssests::from_num_vars::(*num_vars, &mut rng); + )* + + // Proving group + let mut proving_group = $criterion.benchmark_group(format!("PCS-Proving {}", num_vars)); + proving_group + .sampling_mode(SamplingMode::Auto) + .sample_size(10); + + $( + proving_group.bench_with_input(BenchmarkId::new(type_name::<$eval_engine>(), num_vars), &num_vars, |b, _| { + $proving_fn(b, &$assets); + }); + )* + + proving_group.finish(); + + // Verifying group + let mut verifying_group = $criterion.benchmark_group(format!("PCS-Verifying {}", num_vars)); + verifying_group + .sampling_mode(SamplingMode::Auto) + .sample_size(10); + + $( + verifying_group.bench_with_input(BenchmarkId::new(type_name::<$eval_engine>(), num_vars), &num_vars, |b, _| { + $verifying_fn(b, &$assets); + }); + )* + + verifying_group.finish(); + } + }; +} + +fn bench_pcs(c: &mut Criterion) { + benchmark_all_engines!( + c, + NUM_VARS_TEST_VECTOR, + bench_pcs_proving_internal, + bench_pcs_verifying_internal, + (ipa_assets, IPAEvaluationEngine), + (hyperkzg_assets, MLEvaluationEngine), + (zm_assets, ZMPCS) + ); +} + +fn bench_pcs_proving_internal>( + b: &mut Bencher<'_>, + bench_assets: &BenchAssests, +) { + // Bench generate proof. + b.iter(|| { + EE::prove( + &bench_assets.ck, + &bench_assets.prover_key, + &mut E::TE::new(b"TestEval"), + &bench_assets.commitment, + bench_assets.poly.evaluations(), + &bench_assets.point, + &bench_assets.eval, + ) + .unwrap(); + }); +} + +fn bench_pcs_verifying_internal>( + b: &mut Bencher<'_>, + bench_assets: &BenchAssests, +) { + // Bench verify proof. + b.iter(|| { + EE::verify( + &bench_assets.verifier_key, + &mut E::TE::new(b"TestEval"), + &bench_assets.commitment, + &bench_assets.point, + &bench_assets.eval, + bench_assets.proof.as_ref().unwrap(), + ) + .unwrap(); + }); +} diff --git a/benches/ppsnark.rs b/benches/ppsnark.rs index 56a24ac0..4fce0896 100644 --- a/benches/ppsnark.rs +++ b/benches/ppsnark.rs @@ -4,6 +4,7 @@ use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; use core::marker::PhantomData; use criterion::*; use ff::PrimeField; +use halo2curves::bn256::Bn256; use nova_snark::{ provider::Bn256EngineKZG, spartan::direct::DirectSNARK, @@ -12,7 +13,7 @@ use nova_snark::{ use std::time::Duration; type E = Bn256EngineKZG; -type EE = nova_snark::provider::hyperkzg::EvaluationEngine; +type EE = nova_snark::provider::hyperkzg::EvaluationEngine; type S = nova_snark::spartan::ppsnark::RelaxedR1CSSNARK; // To run these benchmarks, first download `criterion` with `cargo install cargo install cargo-criterion`. @@ -124,7 +125,7 @@ impl StepCircuit for NonTrivialCircuit { let mut y = z[0].clone(); for i in 0..self.num_cons { y = x.square(cs.namespace(|| format!("x_sq_{i}")))?; - x = y.clone(); + x.clone_from(&y); } Ok(vec![y]) } diff --git a/benches/recursive-snark.rs b/benches/recursive-snark.rs index 72beaa16..3b39753d 100644 --- a/benches/recursive-snark.rs +++ b/benches/recursive-snark.rs @@ -20,7 +20,7 @@ type E2 = GrumpkinEngine; type C1 = NonTrivialCircuit<::Scalar>; type C2 = TrivialCircuit<::Scalar>; -// To run these benchmarks, first download `criterion` with `cargo install cargo install cargo-criterion`. +// To run these benchmarks, first download `criterion` with `cargo install cargo-criterion`. // Then `cargo criterion --bench recursive-snark`. The results are located in `target/criterion/data/`. // For flamegraphs, run `cargo criterion --bench recursive-snark --features flamegraph -- --profile-time `. // The results are located in `target/criterion/profile/`. @@ -165,7 +165,7 @@ impl StepCircuit for NonTrivialCircuit { let mut y = x.clone(); for i in 0..self.num_cons { y = x.square(cs.namespace(|| format!("x_sq_{i}")))?; - x = y.clone(); + x.clone_from(&y); } Ok(vec![y]) } diff --git a/examples/and.rs b/examples/and.rs index 33b50bb7..4b3e5b62 100644 --- a/examples/and.rs +++ b/examples/and.rs @@ -8,6 +8,7 @@ use core::marker::PhantomData; use ff::Field; use ff::{PrimeField, PrimeFieldBits}; use flate2::{write::ZlibEncoder, Compression}; +use halo2curves::bn256::Bn256; use nova_snark::{ provider::{Bn256EngineKZG, GrumpkinEngine}, traits::{ @@ -22,7 +23,7 @@ use std::time::Instant; type E1 = Bn256EngineKZG; type E2 = GrumpkinEngine; -type EE1 = nova_snark::provider::hyperkzg::EvaluationEngine; +type EE1 = nova_snark::provider::hyperkzg::EvaluationEngine; type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine; type S1 = nova_snark::spartan::snark::RelaxedR1CSSNARK; // non-preprocessing SNARK type S2 = nova_snark::spartan::snark::RelaxedR1CSSNARK; // non-preprocessing SNARK diff --git a/examples/hashchain.rs b/examples/hashchain.rs index d0554568..16609ee5 100644 --- a/examples/hashchain.rs +++ b/examples/hashchain.rs @@ -4,6 +4,7 @@ use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; use ff::Field; use flate2::{write::ZlibEncoder, Compression}; use generic_array::typenum::U24; +use halo2curves::bn256::Bn256; use neptune::{ circuit2::Elt, sponge::{ @@ -26,7 +27,7 @@ use std::time::Instant; type E1 = Bn256EngineKZG; type E2 = GrumpkinEngine; -type EE1 = nova_snark::provider::hyperkzg::EvaluationEngine; +type EE1 = nova_snark::provider::hyperkzg::EvaluationEngine; type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine; type S1 = nova_snark::spartan::snark::RelaxedR1CSSNARK; // non-preprocessing SNARK type S2 = nova_snark::spartan::snark::RelaxedR1CSSNARK; // non-preprocessing SNARK diff --git a/examples/minroot.rs b/examples/minroot.rs index 7a589b95..a0845809 100644 --- a/examples/minroot.rs +++ b/examples/minroot.rs @@ -4,6 +4,7 @@ use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; use ff::Field; use flate2::{write::ZlibEncoder, Compression}; +use halo2curves::bn256::Bn256; use nova_snark::{ provider::{Bn256EngineKZG, GrumpkinEngine}, traits::{ @@ -18,7 +19,7 @@ use std::time::Instant; type E1 = Bn256EngineKZG; type E2 = GrumpkinEngine; -type EE1 = nova_snark::provider::hyperkzg::EvaluationEngine; +type EE1 = nova_snark::provider::hyperkzg::EvaluationEngine; type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine; type S1 = nova_snark::spartan::snark::RelaxedR1CSSNARK; // non-preprocessing SNARK type S2 = nova_snark::spartan::snark::RelaxedR1CSSNARK; // non-preprocessing SNARK diff --git a/src/errors.rs b/src/errors.rs index 8a0912bc..d573bc3f 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -34,9 +34,9 @@ pub enum NovaError { /// returned if the provided number of steps is zero #[error("InvalidNumSteps")] InvalidNumSteps, - /// returned when an invalid PCS evaluation argument is provided - #[error("InvalidPCS")] - InvalidPCS, + /// returned if there is an error in the proof/verification of a PCS + #[error("PCSError")] + PCSError(#[from] PCSError), /// returned when an invalid sum-check proof is provided #[error("InvalidSumcheckProof")] InvalidSumcheckProof, @@ -79,3 +79,17 @@ impl From for NovaError { } } } + +/// Errors specific to the Polynomial commitment scheme +#[derive(Clone, Debug, Eq, PartialEq, Error)] +pub enum PCSError { + /// returned when an invalid inner product argument is provided + #[error("InvalidIPA")] + InvalidIPA, + /// returned when there is a Zeromorph error + #[error("ZMError")] + ZMError, + /// returned when a length check fails in a PCS + #[error("LengthError")] + LengthError, +} diff --git a/src/gadgets/nonnative/mod.rs b/src/gadgets/nonnative/mod.rs index 8167e5a7..4d611cbb 100644 --- a/src/gadgets/nonnative/mod.rs +++ b/src/gadgets/nonnative/mod.rs @@ -6,16 +6,12 @@ use ff::PrimeField; trait OptionExt { fn grab(&self) -> Result<&T, SynthesisError>; - fn grab_mut(&mut self) -> Result<&mut T, SynthesisError>; } impl OptionExt for Option { fn grab(&self) -> Result<&T, SynthesisError> { self.as_ref().ok_or(SynthesisError::AssignmentMissing) } - fn grab_mut(&mut self) -> Result<&mut T, SynthesisError> { - self.as_mut().ok_or(SynthesisError::AssignmentMissing) - } } trait BitAccess { diff --git a/src/gadgets/nonnative/util.rs b/src/gadgets/nonnative/util.rs index f26c09f6..b4997368 100644 --- a/src/gadgets/nonnative/util.rs +++ b/src/gadgets/nonnative/util.rs @@ -14,8 +14,6 @@ use std::io::{self, Write}; pub struct Bit { /// The linear combination which constrain the value of the bit pub bit: LinearCombination, - /// The value of the bit (filled at witness-time) - pub value: Option, } #[derive(Clone)] @@ -58,7 +56,6 @@ impl Bit { Ok(Self { bit: LinearCombination::zero() + var, - value, }) } } diff --git a/src/lib.rs b/src/lib.rs index a999f127..c9fd5b4f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -850,8 +850,8 @@ mod tests { use super::*; use crate::{ provider::{ - pedersen::CommitmentKeyExtTrait, traits::DlogGroup, Bn256EngineIPA, Bn256EngineKZG, - GrumpkinEngine, PallasEngine, Secp256k1Engine, Secq256k1Engine, VestaEngine, + non_hiding_zeromorph::ZMPCS, traits::DlogGroup, Bn256EngineIPA, Bn256EngineKZG, + Bn256EngineZM, GrumpkinEngine, PallasEngine, Secp256k1Engine, Secq256k1Engine, VestaEngine, }, traits::{circuit::TrivialCircuit, evaluation::EvaluationEngineTrait, snark::default_ck_hint}, }; @@ -859,9 +859,10 @@ mod tests { use core::{fmt::Write, marker::PhantomData}; use expect_test::{expect, Expect}; use ff::PrimeField; + use halo2curves::bn256::Bn256; type EE = provider::ipa_pc::EvaluationEngine; - type EEPrime = provider::hyperkzg::EvaluationEngine; + type EEPrime = provider::hyperkzg::EvaluationEngine; type S = spartan::snark::RelaxedR1CSSNARK; type SPrime = spartan::ppsnark::RelaxedR1CSSNARK; @@ -913,7 +914,7 @@ mod tests { } } - fn test_pp_digest_with(circuit1: &T1, circuit2: &T2, expected: &Expect) + fn test_pp_digest_with(circuit1: &T1, circuit2: &T2, expected: &Expect) where E1: Engine::Scalar>, E2: Engine::Scalar>, @@ -921,13 +922,12 @@ mod tests { E2::GE: DlogGroup, T1: StepCircuit, T2: StepCircuit, - // required to use the IPA in the initialization of the commitment key hints below - >::CommitmentKey: CommitmentKeyExtTrait, - >::CommitmentKey: CommitmentKeyExtTrait, + EE1: EvaluationEngineTrait, + EE2: EvaluationEngineTrait, { // this tests public parameters with a size specifically intended for a spark-compressed SNARK - let ck_hint1 = &*SPrime::>::ck_floor(); - let ck_hint2 = &*SPrime::>::ck_floor(); + let ck_hint1 = &*SPrime::::ck_floor(); + let ck_hint2 = &*SPrime::::ck_floor(); let pp = PublicParams::::setup(circuit1, circuit2, ck_hint1, ck_hint2).unwrap(); let digest_str = pp @@ -939,24 +939,25 @@ mod tests { let _ = write!(output, "{b:02x}"); output }); + expected.assert_eq(&digest_str); } #[test] fn test_pp_digest() { - test_pp_digest_with::( + test_pp_digest_with::, EE<_>>( &TrivialCircuit::<_>::default(), &TrivialCircuit::<_>::default(), &expect!["a69d6cf6d014c3a5cc99b77afc86691f7460faa737207dd21b30e8241fae8002"], ); - test_pp_digest_with::( + test_pp_digest_with::, EE<_>>( &TrivialCircuit::<_>::default(), &TrivialCircuit::<_>::default(), &expect!["b22ab3456df4bd391804a39fae582b37ed4a8d90ace377337940ac956d87f701"], ); - test_pp_digest_with::( + test_pp_digest_with::, EE<_>>( &TrivialCircuit::<_>::default(), &TrivialCircuit::<_>::default(), &expect!["c8aec89a3ea90317a0ecdc9150f4fc3648ca33f6660924a192cafd82e2939b02"], @@ -1191,10 +1192,17 @@ mod tests { ); test_ivc_nontrivial_with_compression_with::, EE<_>>(); + test_ivc_nontrivial_with_compression_with::< + Bn256EngineZM, + GrumpkinEngine, + ZMPCS, + EE<_>, + >(); + test_ivc_nontrivial_with_spark_compression_with::< Bn256EngineKZG, GrumpkinEngine, - provider::hyperkzg::EvaluationEngine<_>, + provider::hyperkzg::EvaluationEngine, EE<_>, >(); } @@ -1300,6 +1308,12 @@ mod tests { >(); test_ivc_nontrivial_with_spark_compression_with::, EE<_>>( ); + test_ivc_nontrivial_with_spark_compression_with::< + Bn256EngineZM, + GrumpkinEngine, + ZMPCS, + EE<_>, + >(); } fn test_ivc_nondet_with_compression_with() @@ -1440,6 +1454,8 @@ mod tests { test_ivc_nondet_with_compression_with::, EE<_>>(); test_ivc_nondet_with_compression_with::, EE<_>>(); test_ivc_nondet_with_compression_with::, EE<_>>(); + test_ivc_nondet_with_compression_with::, EE<_>>( + ); } fn test_ivc_base_with() diff --git a/src/provider/bn256_grumpkin.rs b/src/provider/bn256_grumpkin.rs index bb753155..35ac41c4 100644 --- a/src/provider/bn256_grumpkin.rs +++ b/src/provider/bn256_grumpkin.rs @@ -2,7 +2,7 @@ use crate::{ errors::NovaError, impl_traits, - provider::traits::{CompressedGroup, DlogGroup, PairingGroup}, + provider::traits::{CompressedGroup, DlogGroup}, traits::{Group, PrimeFieldExt, TranscriptReprTrait}, }; use digest::{ExtendableOutput, Update}; @@ -13,7 +13,7 @@ use num_traits::Num; // Remove this when https://github.com/zcash/pasta_curves/issues/41 resolves use halo2curves::{ bn256::{ - pairing, G1Affine as Bn256Affine, G1Compressed as Bn256Compressed, G2Affine, G2Compressed, Gt, + G1Affine as Bn256Affine, G1Compressed as Bn256Compressed, G2Affine, G2Compressed, G1 as Bn256Point, G2, }, grumpkin::{G1Affine as GrumpkinAffine, G1Compressed as GrumpkinCompressed, G1 as GrumpkinPoint}, @@ -52,15 +52,6 @@ impl_traits!( "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001" ); -impl PairingGroup for Bn256Point { - type G2 = G2; - type GT = Gt; - - fn pairing(p: &Self, q: &Self::G2) -> Self::GT { - pairing(&p.affine(), &q.affine()) - } -} - impl Group for G2 { type Base = bn256::Base; type Scalar = bn256::Scalar; diff --git a/src/provider/hyperkzg.rs b/src/provider/hyperkzg.rs index 6afc9a53..91d051e3 100644 --- a/src/provider/hyperkzg.rs +++ b/src/provider/hyperkzg.rs @@ -8,365 +8,133 @@ #![allow(non_snake_case)] use crate::{ errors::NovaError, - provider::traits::{CompressedGroup, DlogGroup, PairingGroup}, + provider::{ + kzg_commitment::{KZGCommitmentEngine, KZGProverKey, KZGVerifierKey, UniversalKZGParam}, + pedersen::Commitment, + traits::DlogGroup, + }, + spartan::polys::univariate::UniPoly, traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, + commitment::{CommitmentEngineTrait, Len}, evaluation::EvaluationEngineTrait, - AbsorbInROTrait, Engine, ROTrait, TranscriptEngineTrait, TranscriptReprTrait, + Engine as NovaEngine, Group, TranscriptEngineTrait, TranscriptReprTrait, }, zip_with, }; -use core::{ - marker::PhantomData, - ops::{Add, Mul, MulAssign}, -}; +use core::marker::PhantomData; use ff::Field; +use group::{Curve, Group as _}; +use halo2curves::pairing::{Engine, MillerLoopResult, MultiMillerLoop}; use itertools::Itertools; -use rand_core::OsRng; use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -/// Alias to points on G1 that are in preprocessed form -type G1Affine = <::GE as DlogGroup>::AffineGroupElement; - -/// Alias to points on G1 that are in preprocessed form -type G2Affine = <<::GE as PairingGroup>::G2 as DlogGroup>::AffineGroupElement; - -/// KZG commitment key -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CommitmentKey -where - E::GE: PairingGroup, -{ - ck: Vec<::AffineGroupElement>, - tau_H: <::G2 as DlogGroup>::AffineGroupElement, // needed only for the verifier key -} - -impl Len for CommitmentKey -where - E::GE: PairingGroup, -{ - fn length(&self) -> usize { - self.ck.len() - } -} - -/// A KZG commitment -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - comm: ::GE, -} - -/// A compressed commitment (suitable for serialization) -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct CompressedCommitment -where - E: Engine, - E::GE: PairingGroup, -{ - comm: ::CompressedGroupElement, -} - -impl CommitmentTrait for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - type CompressedCommitment = CompressedCommitment; - - fn compress(&self) -> Self::CompressedCommitment { - CompressedCommitment { - comm: self.comm.compress(), - } - } - - fn to_coordinates(&self) -> (E::Base, E::Base, bool) { - self.comm.to_coordinates() - } - - fn decompress(c: &Self::CompressedCommitment) -> Result { - let comm = <::GE as DlogGroup>::CompressedGroupElement::decompress(&c.comm)?; - Ok(Commitment { comm }) - } -} - -impl Default for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - fn default() -> Self { - Commitment { - comm: E::GE::zero(), - } - } -} - -impl TranscriptReprTrait for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - fn to_transcript_bytes(&self) -> Vec { - let (x, y, is_infinity) = self.comm.to_coordinates(); - let is_infinity_byte = (!is_infinity).into(); - [ - x.to_transcript_bytes(), - y.to_transcript_bytes(), - [is_infinity_byte].to_vec(), - ] - .concat() - } -} - -impl AbsorbInROTrait for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - fn absorb_in_ro(&self, ro: &mut E::RO) { - let (x, y, is_infinity) = self.comm.to_coordinates(); - ro.absorb(x); - ro.absorb(y); - ro.absorb(if is_infinity { - E::Base::ONE - } else { - E::Base::ZERO - }); - } -} - -impl TranscriptReprTrait for CompressedCommitment -where - E::GE: PairingGroup, -{ - fn to_transcript_bytes(&self) -> Vec { - self.comm.to_transcript_bytes() - } -} - -impl MulAssign for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - fn mul_assign(&mut self, scalar: E::Scalar) { - let result = (self as &Commitment).comm * scalar; - *self = Commitment { comm: result }; - } -} - -impl<'a, 'b, E> Mul<&'b E::Scalar> for &'a Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - type Output = Commitment; - - fn mul(self, scalar: &'b E::Scalar) -> Commitment { - Commitment { - comm: self.comm * scalar, - } - } -} - -impl Mul for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - type Output = Commitment; - - fn mul(self, scalar: E::Scalar) -> Commitment { - Commitment { - comm: self.comm * scalar, - } - } -} - -impl Add for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - type Output = Commitment; - - fn add(self, other: Commitment) -> Commitment { - Commitment { - comm: self.comm + other.comm, - } - } -} - -/// Provides a commitment engine -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct CommitmentEngine { - _p: PhantomData, -} - -impl CommitmentEngineTrait for CommitmentEngine -where - E: Engine, - E::GE: PairingGroup, -{ - type Commitment = Commitment; - type CommitmentKey = CommitmentKey; - - fn setup(_label: &'static [u8], n: usize) -> Self::CommitmentKey { - // NOTE: this is for testing purposes and should not be used in production - // TODO: we need to decide how to generate load/store parameters - let tau = E::Scalar::random(OsRng); - let num_gens = n.next_power_of_two(); - - // Compute powers of tau in E::Scalar, then scalar muls in parallel - let mut powers_of_tau: Vec = Vec::with_capacity(num_gens); - powers_of_tau.insert(0, E::Scalar::ONE); - for i in 1..num_gens { - powers_of_tau.insert(i, powers_of_tau[i - 1] * tau); - } - - let ck: Vec> = (0..num_gens) - .into_par_iter() - .map(|i| (::gen() * powers_of_tau[i]).affine()) - .collect(); - - let tau_H = (<::G2 as DlogGroup>::gen() * tau).affine(); - - Self::CommitmentKey { ck, tau_H } - } - - fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment { - assert!(ck.ck.len() >= v.len()); - Commitment { - comm: E::GE::vartime_multiscalar_mul(v, &ck.ck[..v.len()]), - } - } -} - -/// Provides an implementation of generators for proving evaluations -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct ProverKey { - _p: PhantomData, -} - -/// A verifier key -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct VerifierKey -where - E::GE: PairingGroup, -{ - G: G1Affine, - H: G2Affine, - tau_H: G2Affine, -} +use ref_cast::RefCast; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// Provides an implementation of a polynomial evaluation argument #[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct EvaluationArgument -where - E::GE: PairingGroup, -{ - com: Vec>, - w: Vec>, - v: Vec>, +#[serde(bound( + serialize = "E::G1Affine: Serialize, E::Fr: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>, E::Fr: Deserialize<'de>" +))] +pub struct EvaluationArgument { + comms: Vec, + w: Vec, + evals: Vec>, } /// Provides an implementation of a polynomial evaluation engine using KZG #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct EvaluationEngine { - _p: PhantomData, +pub struct EvaluationEngine { + _p: PhantomData<(E, NE)>, } -impl EvaluationEngine +// This impl block defines helper functions that are not a part of +// EvaluationEngineTrait, but that we will use to implement the trait methods. +impl EvaluationEngine where E: Engine, - E::GE: PairingGroup, + NE: NovaEngine, + E::G1: DlogGroup, + E::Fr: TranscriptReprTrait, + E::G1Affine: TranscriptReprTrait, { - // This impl block defines helper functions that are not a part of - // EvaluationEngineTrait, but that we will use to implement the trait methods. - fn compute_challenge(com: &[G1Affine], transcript: &mut ::TE) -> E::Scalar { + fn compute_challenge( + com: &[E::G1Affine], + transcript: &mut impl TranscriptEngineTrait, + ) -> E::Fr { transcript.absorb(b"c", &com.to_vec().as_slice()); - transcript.squeeze(b"c").unwrap() } // Compute challenge q = Hash(vk, C0, ..., C_{k-1}, u0, ...., u_{t-1}, // (f_i(u_j))_{i=0..k-1,j=0..t-1}) - fn get_batch_challenge(v: &[Vec], transcript: &mut ::TE) -> E::Scalar { + // It is assumed that both 'C' and 'u' are already absorbed by the transcript + fn get_batch_challenge( + v: &[Vec], + transcript: &mut impl TranscriptEngineTrait, + ) -> E::Fr { transcript.absorb( b"v", &v.iter() .flatten() .cloned() - .collect::>() + .collect::>() .as_slice(), ); transcript.squeeze(b"r").unwrap() } - fn batch_challenge_powers(q: E::Scalar, k: usize) -> Vec { + fn batch_challenge_powers(q: E::Fr, k: usize) -> Vec { // Compute powers of q : (1, q, q^2, ..., q^(k-1)) - let mut q_powers = vec![E::Scalar::ONE; k]; - for i in 1..k { - q_powers[i] = q_powers[i - 1] * q; - } - q_powers + std::iter::successors(Some(E::Fr::ONE), |&x| Some(x * q)) + .take(k) + .collect() } - fn verifier_second_challenge(W: &[G1Affine], transcript: &mut ::TE) -> E::Scalar { + fn verifier_second_challenge( + W: &[E::G1Affine], + transcript: &mut impl TranscriptEngineTrait, + ) -> E::Fr { transcript.absorb(b"W", &W.to_vec().as_slice()); transcript.squeeze(b"d").unwrap() } } -impl EvaluationEngineTrait for EvaluationEngine +impl EvaluationEngineTrait for EvaluationEngine where - E: Engine>, - E::GE: PairingGroup, + E: MultiMillerLoop, + NE: NovaEngine>, + E::Fr: Serialize + DeserializeOwned, + E::G1Affine: Serialize + DeserializeOwned, + E::G2Affine: Serialize + DeserializeOwned, + E::G1: DlogGroup, + ::Base: TranscriptReprTrait, // Note: due to the move of the bound TranscriptReprTrait on G::Base from Group to Engine + E::Fr: TranscriptReprTrait, + E::G1Affine: TranscriptReprTrait, { type EvaluationArgument = EvaluationArgument; - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; - - fn setup( - ck: &>::CommitmentKey, - ) -> (Self::ProverKey, Self::VerifierKey) { - let pk = ProverKey { - _p: Default::default(), - }; - - let vk = VerifierKey { - G: E::GE::gen().affine(), - H: <::G2 as DlogGroup>::gen().affine(), - tau_H: ck.tau_H.clone(), - }; + type ProverKey = KZGProverKey; + type VerifierKey = KZGVerifierKey; - (pk, vk) + fn setup(ck: &UniversalKZGParam) -> (Self::ProverKey, Self::VerifierKey) { + ck.trim(ck.length() - 1) } fn prove( - ck: &CommitmentKey, + ck: &UniversalKZGParam, _pk: &Self::ProverKey, - transcript: &mut ::TE, - _C: &Commitment, - hat_P: &[E::Scalar], - point: &[E::Scalar], - _eval: &E::Scalar, + transcript: &mut ::TE, + _C: &Commitment, + hat_P: &[E::Fr], + point: &[E::Fr], + _eval: &E::Fr, ) -> Result { - let x: Vec = point.to_vec(); + let x: Vec = point.to_vec(); //////////////// begin helper closures ////////// - let kzg_open = |f: &[E::Scalar], u: E::Scalar| -> G1Affine { + let kzg_open = |f: &[E::Fr], u: E::Fr| -> E::G1Affine { // On input f(x) and u compute the witness polynomial used to prove // that f(u) = v. The main part of this is to compute the // division (f(x) - f(u)) / (x - u), but we don't use a general @@ -381,11 +149,11 @@ where // same. One advantage is that computing f(u) could be decoupled // from kzg_open, it could be done later or separate from computing W. - let compute_witness_polynomial = |f: &[E::Scalar], u: E::Scalar| -> Vec { + let compute_witness_polynomial = |f: &[E::Fr], u: E::Fr| -> Vec { let d = f.len(); // Compute h(x) = f(x)/(x - u) - let mut h = vec![E::Scalar::ZERO; d]; + let mut h = vec![E::Fr::ZERO; d]; for i in (1..d).rev() { h[i - 1] = f[i] + h[i] * u; } @@ -395,33 +163,23 @@ where let h = compute_witness_polynomial(f, u); - E::CE::commit(ck, &h).comm.affine() + >::commit(ck, &h) + .comm + .affine() }; - let kzg_open_batch = |f: &[Vec], - u: &[E::Scalar], - transcript: &mut ::TE| - -> (Vec>, Vec>) { - let poly_eval = |f: &[E::Scalar], u: E::Scalar| -> E::Scalar { - let mut v = f[0]; - let mut u_power = E::Scalar::ONE; - - for fi in f.iter().skip(1) { - u_power *= u; - v += u_power * fi; - } - - v - }; - - let scalar_vector_muladd = |a: &mut Vec, v: &Vec, s: E::Scalar| { + let kzg_open_batch = |f: &[Vec], + u: &[E::Fr], + transcript: &mut ::TE| + -> (Vec, Vec>) { + let scalar_vector_muladd = |a: &mut Vec, v: &Vec, s: E::Fr| { assert!(a.len() >= v.len()); - for i in 0..v.len() { - a[i] += s * v[i]; - } + a.par_iter_mut() + .zip(v.par_iter()) + .for_each(|(c, v)| *c += s * v); }; - let kzg_compute_batch_polynomial = |f: &[Vec], q: E::Scalar| -> Vec { + let kzg_compute_batch_polynomial = |f: &[Vec], q: E::Fr| -> Vec { let k = f.len(); // Number of polynomials we're batching let q_powers = Self::batch_challenge_powers(q, k); @@ -441,13 +199,12 @@ where // The verifier needs f_i(u_j), so we compute them here // (V will compute B(u_j) itself) - let mut v = vec![vec!(E::Scalar::ZERO; k); t]; + let mut v = vec![vec!(E::Fr::ZERO; k); t]; v.par_iter_mut().enumerate().for_each(|(i, v_i)| { // for each point u v_i.par_iter_mut().zip_eq(f).for_each(|(v_ij, f)| { - // for each poly f // for each poly f (except the last one - since it is constant) - *v_ij = poly_eval(f, u[i]); + *v_ij = UniPoly::ref_cast(f).evaluate(&u[i]); }); }); @@ -455,15 +212,11 @@ where let B = kzg_compute_batch_polynomial(f, q); // Now open B at u0, ..., u_{t-1} - let w = u - .into_par_iter() - .map(|ui| kzg_open(&B, *ui)) - .collect::>>(); + let w = u.par_iter().map(|ui| kzg_open(&B, *ui)).collect::>(); // The prover computes the challenge to keep the transcript in the same // state as that of the verifier let _d_0 = Self::verifier_second_challenge(&w, transcript); - (w, v) }; @@ -474,13 +227,14 @@ where assert_eq!(n, 1 << ell); // Below we assume that n is a power of two // Phase 1 -- create commitments com_1, ..., com_\ell - // We do not compute final Pi (and its commitment) as it is constant and equals to 'eval' - // also known to verifier, so can be derived on its side as well - let mut polys: Vec> = Vec::new(); + let mut polys: Vec> = Vec::new(); polys.push(hat_P.to_vec()); - for i in 0..ell - 1 { + + // We don't compute final Pi (and its commitment) as it is constant and equals to 'eval' + // also known to verifier, so can be derived on its side as well + for i in 0..x.len() - 1 { let Pi_len = polys[i].len() / 2; - let mut Pi = vec![E::Scalar::ZERO; Pi_len]; + let mut Pi = vec![E::Fr::ZERO; Pi_len]; #[allow(clippy::needless_range_loop)] Pi.par_iter_mut().enumerate().for_each(|(j, Pi_j)| { @@ -492,42 +246,47 @@ where // We do not need to commit to the first polynomial as it is already committed. // Compute commitments in parallel - let com: Vec> = (1..polys.len()) + let comms: Vec = (1..polys.len()) .into_par_iter() - .map(|i| E::CE::commit(ck, &polys[i]).comm.affine()) + .map(|i| { + >::commit(ck, &polys[i]) + .comm + .affine() + }) .collect(); // Phase 2 // We do not need to add x to the transcript, because in our context x was obtained from the transcript. // We also do not need to absorb `C` and `eval` as they are already absorbed by the transcript by the caller - let r = Self::compute_challenge(&com, transcript); + let r = Self::compute_challenge(&comms, transcript); let u = vec![r, -r, r * r]; // Phase 3 -- create response - let (w, v) = kzg_open_batch(&polys, &u, transcript); + let (w, evals) = kzg_open_batch(&polys, &u, transcript); - Ok(EvaluationArgument { com, w, v }) + Ok(EvaluationArgument { comms, w, evals }) } /// A method to verify purported evaluations of a batch of polynomials fn verify( vk: &Self::VerifierKey, - transcript: &mut ::TE, - C: &Commitment, - point: &[E::Scalar], - P_of_x: &E::Scalar, + transcript: &mut ::TE, + C: &Commitment, + point: &[E::Fr], + P_of_x: &E::Fr, pi: &Self::EvaluationArgument, ) -> Result<(), NovaError> { let x = point.to_vec(); let y = P_of_x; // vk is hashed in transcript already, so we do not add it here - let kzg_verify_batch = |vk: &VerifierKey, - C: &Vec>, - W: &Vec>, - u: &Vec, - v: &Vec>, - transcript: &mut ::TE| + + let kzg_verify_batch = |vk: &KZGVerifierKey, + C: &Vec, + W: &Vec, + u: &Vec, + v: &Vec>, + transcript: &mut ::TE| -> bool { let k = C.len(); let t = u.len(); @@ -538,13 +297,8 @@ where let d_0 = Self::verifier_second_challenge(W, transcript); let d_1 = d_0 * d_0; - // Shorthand to convert from preprocessed G1 elements to non-preprocessed - let from_ppG1 = |P: &G1Affine| ::group(P); - // Shorthand to convert from preprocessed G2 elements to non-preprocessed - let from_ppG2 = |P: &G2Affine| <::G2 as DlogGroup>::group(P); - - assert_eq!(t, 3); - assert_eq!(W.len(), 3); + assert!(t == 3); + assert!(W.len() == 3); // We write a special case for t=3, since this what is required for // hyperkzg. Following the paper directly, we must compute: // let L0 = C_B - vk.G * B_u[0] + W[0] * u[0]; @@ -563,9 +317,9 @@ where // L0, L1, L2 can be replaced by single MSM of C with the powers of q multiplied by (1 + d_0 + d_1) // with additionally concatenated inputs for scalars/bases. - let q_power_multiplier = E::Scalar::ONE + d_0 + d_1; + let q_power_multiplier = E::Fr::ONE + d_0 + d_1; - let q_powers_multiplied: Vec = q_powers + let q_powers_multiplied: Vec = q_powers .par_iter() .map(|q_power| *q_power * q_power_multiplier) .collect(); @@ -575,46 +329,55 @@ where let B_u = v .into_par_iter() .map(|v_i| zip_with!(iter, (q_powers, v_i), |a, b| *a * *b).sum()) - .collect::>(); + .collect::>(); - let L = E::GE::vartime_multiscalar_mul( + let L = NE::GE::vartime_multiscalar_mul( &[ &q_powers_multiplied[..k], &[ u[0], (u[1] * d_0), (u[2] * d_1), - -(B_u[0] + d_0 * B_u[1] + d_1 * B_u[2]), + (B_u[0] + d_0 * B_u[1] + d_1 * B_u[2]), ], ] .concat(), &[ &C[..k], - &[W[0].clone(), W[1].clone(), W[2].clone(), vk.G.clone()], + &[ + E::G1::from(W[0]).into(), + E::G1::from(W[1]).into(), + E::G1::from(W[2]).into(), + (-E::G1::from(vk.g)).into(), + ], ] .concat(), ); - let R0 = from_ppG1(&W[0]); - let R1 = from_ppG1(&W[1]); - let R2 = from_ppG1(&W[2]); + let R0 = E::G1::from(W[0]); + let R1 = E::G1::from(W[1]); + let R2 = E::G1::from(W[2]); let R = R0 + R1 * d_0 + R2 * d_1; // Check that e(L, vk.H) == e(R, vk.tau_H) - (::pairing(&L, &from_ppG2(&vk.H))) - == (::pairing(&R, &from_ppG2(&vk.tau_H))) + let pairing_inputs = [ + (&(-L).to_affine(), &E::G2Prepared::from(vk.h)), + (&R.to_affine(), &E::G2Prepared::from(vk.beta_h)), + ]; + + let pairing_result = E::multi_miller_loop(&pairing_inputs).final_exponentiation(); + pairing_result.is_identity().into() }; ////// END verify() closure helpers let ell = x.len(); - let mut com = pi.com.clone(); + let mut com = pi.comms.clone(); // we do not need to add x to the transcript, because in our context x was // obtained from the transcript let r = Self::compute_challenge(&com, transcript); - - if r == E::Scalar::ZERO || C.comm == E::GE::zero() { + if r == E::Fr::ZERO || C.comm == E::G1::identity() { return Err(NovaError::ProofVerifyError); } com.insert(0, C.comm.affine()); // set com_0 = C, shifts other commitments to the right @@ -622,7 +385,7 @@ where let u = vec![r, -r, r * r]; // Setup vectors (Y, ypos, yneg) from pi.v - let v = &pi.v; + let v = &pi.evals; if v.len() != 3 { return Err(NovaError::ProofVerifyError); } @@ -635,10 +398,10 @@ where Y.push(*y); // Check consistency of (Y, ypos, yneg) - let two = E::Scalar::from(2u64); + let two = E::Fr::from(2u64); for i in 0..ell { if two * r * Y[i + 1] - != r * (E::Scalar::ONE - x[ell - i - 1]) * (ypos[i] + yneg[i]) + != r * (E::Fr::ONE - x[ell - i - 1]) * (ypos[i] + yneg[i]) + x[ell - i - 1] * (ypos[i] - yneg[i]) { return Err(NovaError::ProofVerifyError); @@ -648,7 +411,7 @@ where } // Check commitments to (Y, ypos, yneg) are valid - if !kzg_verify_batch(vk, &com, &pi.w, &u, &pi.v, transcript) { + if !kzg_verify_batch(vk, &com, &pi.w, &u, &pi.evals, transcript) { return Err(NovaError::ProofVerifyError); } @@ -659,33 +422,35 @@ where #[cfg(test)] mod tests { use super::*; + use crate::provider::test_utils::prove_verify_from_num_vars; use crate::{ - provider::{keccak::Keccak256Transcript, Bn256EngineKZG}, - spartan::polys::multilinear::MultilinearPolynomial, + provider::keccak::Keccak256Transcript, provider::kzg_commitment::KZGCommitmentEngine, }; use bincode::Options; - use rand::SeedableRng; + use halo2curves::bn256::Bn256; - type E = Bn256EngineKZG; - type Fr = ::Scalar; + type E = halo2curves::bn256::Bn256; + type NE = crate::provider::Bn256EngineKZG; + type Fr = ::Scalar; #[test] fn test_hyperkzg_eval() { // Test with poly(X1, X2) = 1 + X1 + X2 + X1*X2 let n = 4; - let ck: CommitmentKey = CommitmentEngine::setup(b"test", n); - let (pk, vk): (ProverKey, VerifierKey) = EvaluationEngine::setup(&ck); + let ck = as CommitmentEngineTrait>::setup(b"test", n); + let (pk, vk) = EvaluationEngine::::setup(&ck); // poly is in eval. representation; evaluated at [(0,0), (0,1), (1,0), (1,1)] let poly = vec![Fr::from(1), Fr::from(2), Fr::from(2), Fr::from(4)]; - let C = CommitmentEngine::commit(&ck, &poly); + let C = as CommitmentEngineTrait>::commit(&ck, &poly); let test_inner = |point: Vec, eval: Fr| -> Result<(), NovaError> { - let mut tr = Keccak256Transcript::new(b"TestEval"); - let proof = EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).unwrap(); - let mut tr = Keccak256Transcript::new(b"TestEval"); - EvaluationEngine::verify(&vk, &mut tr, &C, &point, &eval, &proof) + let mut tr = Keccak256Transcript::::new(b"TestEval"); + let proof = + EvaluationEngine::::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).unwrap(); + let mut tr = Keccak256Transcript::::new(b"TestEval"); + EvaluationEngine::::verify(&vk, &mut tr, &C, &point, &eval, &proof) }; // Call the prover with a (point, eval) pair. @@ -721,6 +486,7 @@ mod tests { } #[test] + #[allow(clippy::assigning_clones)] fn test_hyperkzg_small() { let n = 4; @@ -733,24 +499,37 @@ mod tests { // eval = 28 let eval = Fr::from(28); - let ck: CommitmentKey = CommitmentEngine::setup(b"test", n); - let (pk, vk) = EvaluationEngine::setup(&ck); + let ck = as CommitmentEngineTrait>::setup(b"test", n); + let (pk, vk) = EvaluationEngine::::setup(&ck); // make a commitment - let C = CommitmentEngine::commit(&ck, &poly); + let C = as CommitmentEngineTrait>::commit(&ck, &poly); // prove an evaluation let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); - let proof = - EvaluationEngine::::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval) - .unwrap(); + let proof = EvaluationEngine::::prove( + &ck, + &pk, + &mut prover_transcript, + &C, + &poly, + &point, + &eval, + ) + .unwrap(); let post_c_p = prover_transcript.squeeze(b"c").unwrap(); // verify the evaluation - let mut verifier_transcript = Keccak256Transcript::new(b"TestEval"); - assert!( - EvaluationEngine::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof).is_ok() - ); + let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); + assert!(EvaluationEngine::::verify( + &vk, + &mut verifier_transcript, + &C, + &point, + &eval, + &proof + ) + .is_ok()); let post_c_v = verifier_transcript.squeeze(b"c").unwrap(); // check if the prover transcript and verifier transcript are kept in the same state @@ -765,9 +544,10 @@ mod tests { // Change the proof and expect verification to fail let mut bad_proof = proof.clone(); - bad_proof.v[0] = bad_proof.v[1].clone(); - let mut verifier_transcript2 = Keccak256Transcript::new(b"TestEval"); - assert!(EvaluationEngine::verify( + + bad_proof.evals[0] = bad_proof.evals[1].clone(); + let mut verifier_transcript2 = Keccak256Transcript::::new(b"TestEval"); + assert!(EvaluationEngine::::verify( &vk, &mut verifier_transcript2, &C, @@ -779,40 +559,11 @@ mod tests { } #[test] + #[allow(clippy::assigning_clones)] fn test_hyperkzg_large() { - // test the hyperkzg prover and verifier with random instances (derived from a seed) - for ell in [4, 5, 6] { - let mut rng = rand::rngs::StdRng::seed_from_u64(ell as u64); - - let n = 1 << ell; // n = 2^ell - - let poly = (0..n).map(|_| Fr::random(&mut rng)).collect::>(); - let point = (0..ell).map(|_| Fr::random(&mut rng)).collect::>(); - let eval = MultilinearPolynomial::evaluate_with(&poly, &point); - - let ck: CommitmentKey = CommitmentEngine::setup(b"test", n); - let (pk, vk) = EvaluationEngine::setup(&ck); - - // make a commitment - let C = CommitmentEngine::commit(&ck, &poly); - - // prove an evaluation - let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); - let proof: EvaluationArgument = - EvaluationEngine::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval) - .unwrap(); - - // verify the evaluation - let mut verifier_tr = Keccak256Transcript::new(b"TestEval"); - assert!(EvaluationEngine::verify(&vk, &mut verifier_tr, &C, &point, &eval, &proof).is_ok()); - - // Change the proof and expect verification to fail - let mut bad_proof = proof.clone(); - bad_proof.v[0] = bad_proof.v[1].clone(); - let mut verifier_tr2 = Keccak256Transcript::new(b"TestEval"); - assert!( - EvaluationEngine::verify(&vk, &mut verifier_tr2, &C, &point, &eval, &bad_proof).is_err() - ); + // test the mlkzg prover and verifier with random instances (derived from a seed) + for num_vars in [4, 5, 6] { + prove_verify_from_num_vars::<_, EvaluationEngine>(num_vars); } } } diff --git a/src/provider/ipa_pc.rs b/src/provider/ipa_pc.rs index 37ab4b24..34f57b03 100644 --- a/src/provider/ipa_pc.rs +++ b/src/provider/ipa_pc.rs @@ -1,6 +1,6 @@ //! This module implements `EvaluationEngine` using an IPA-based polynomial commitment scheme use crate::{ - errors::NovaError, + errors::{NovaError, PCSError}, provider::{pedersen::CommitmentKeyExtTrait, traits::DlogGroup}, spartan::polys::eq::EqPolynomial, traits::{ @@ -403,7 +403,21 @@ where if P_hat == CE::::commit(&ck_hat.combine(&ck_c), &[self.a_hat, self.a_hat * b_hat]) { Ok(()) } else { - Err(NovaError::InvalidPCS) + Err(NovaError::PCSError(PCSError::InvalidIPA)) + } + } +} + +#[cfg(test)] +mod test { + use crate::provider::ipa_pc::EvaluationEngine; + use crate::provider::test_utils::prove_verify_from_num_vars; + use crate::provider::GrumpkinEngine; + + #[test] + fn test_multiple_polynomial_size() { + for num_vars in [4, 5, 6] { + prove_verify_from_num_vars::<_, EvaluationEngine>(num_vars); } } } diff --git a/src/provider/kzg_commitment.rs b/src/provider/kzg_commitment.rs new file mode 100644 index 00000000..025d0910 --- /dev/null +++ b/src/provider/kzg_commitment.rs @@ -0,0 +1,223 @@ +//! Commitment engine for KZG commitments +//! + +use std::marker::PhantomData; + +use ff::Field; +use group::{prime::PrimeCurveAffine, Curve, Group as _}; +use halo2curves::pairing::Engine; +use rand::rngs::StdRng; +use rand_core::{CryptoRng, RngCore, SeedableRng}; +use serde::{Deserialize, Serialize}; + +use crate::traits::{ + commitment::{CommitmentEngineTrait, Len}, + Engine as NovaEngine, Group, TranscriptReprTrait, +}; + +use crate::provider::{pedersen::Commitment, traits::DlogGroup}; + +/// `UniversalParams` are the universal parameters for the KZG10 scheme. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" +))] +pub struct UniversalKZGParam { + /// Group elements of the form `{ β^i G }`, where `i` ranges from 0 to + /// `degree`. + pub powers_of_g: Vec, + /// Group elements of the form `{ β^i H }`, where `i` ranges from 0 to + /// `degree`. + pub powers_of_h: Vec, +} + +// for the purpose of the Len trait, we count commitment bases, i.e. G1 elements +impl Len for UniversalKZGParam { + fn length(&self) -> usize { + self.powers_of_g.len() + } +} + +/// `UnivariateProverKey` is used to generate a proof +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>" +))] +pub struct KZGProverKey { + /// generators + pub powers_of_g: Vec, +} + +/// `UVKZGVerifierKey` is used to check evaluation proofs for a given +/// commitment. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" +))] +pub struct KZGVerifierKey { + /// The generator of G1. + pub g: E::G1Affine, + /// The generator of G2. + pub h: E::G2Affine, + /// β times the above generator of G2. + pub beta_h: E::G2Affine, +} + +impl UniversalKZGParam { + /// Returns the maximum supported degree + pub fn max_degree(&self) -> usize { + self.powers_of_g.len() + } + + /// Trim the universal parameters to specialize the public parameters + /// for univariate polynomials to the given `supported_size`, and + /// returns prover key and verifier key. `supported_size` should + /// be in range `1..params.len()` + /// + /// # Panics + /// If `supported_size` is greater than `self.max_degree()`, or `self.max_degree()` is zero. + pub fn trim(&self, supported_size: usize) -> (KZGProverKey, KZGVerifierKey) { + let powers_of_g = self.powers_of_g[..=supported_size].to_vec(); + + let pk = KZGProverKey { powers_of_g }; + let vk = KZGVerifierKey { + g: self.powers_of_g[0], + h: self.powers_of_h[0], + beta_h: self.powers_of_h[1], + }; + (pk, vk) + } +} + +impl UniversalKZGParam { + /// Build SRS for testing. + /// WARNING: THIS FUNCTION IS FOR TESTING PURPOSE ONLY. + /// THE OUTPUT SRS SHOULD NOT BE USED IN PRODUCTION. + pub fn gen_srs_for_testing(mut rng: &mut R, max_degree: usize) -> Self { + let beta = E::Fr::random(&mut rng); + let g = E::G1::random(&mut rng); + let h = E::G2::random(rng); + + let (powers_of_g_projective, powers_of_h_projective) = rayon::join( + || { + (0..=max_degree) + .scan(g, |acc, _| { + let val = *acc; + *acc *= beta; + Some(val) + }) + .collect::>() + }, + || { + (0..=max_degree) + .scan(h, |acc, _| { + let val = *acc; + *acc *= beta; + Some(val) + }) + .collect::>() + }, + ); + + let mut powers_of_g = vec![E::G1Affine::identity(); powers_of_g_projective.len()]; + let mut powers_of_h = vec![E::G2Affine::identity(); powers_of_h_projective.len()]; + + rayon::join( + || E::G1::batch_normalize(&powers_of_g_projective, &mut powers_of_g), + || E::G2::batch_normalize(&powers_of_h_projective, &mut powers_of_h), + ); + + Self { + powers_of_g, + powers_of_h, + } + } +} + +/// Commitments +#[derive(Debug, Clone, Copy, Eq, PartialEq, Default, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>" +))] +pub struct UVKZGCommitment( + /// the actual commitment is an affine point. + pub E::G1Affine, +); + +impl TranscriptReprTrait for UVKZGCommitment +where + E::G1: DlogGroup, + // Note: due to the move of the bound TranscriptReprTrait on G::Base from Group to Engine + ::Base: TranscriptReprTrait, +{ + fn to_transcript_bytes(&self) -> Vec { + // TODO: avoid the round-trip through the group (to_curve .. to_coordinates) + let (x, y, is_infinity) = self.0.to_curve().to_coordinates(); + let is_infinity_byte = (!is_infinity).into(); + [ + x.to_transcript_bytes(), + y.to_transcript_bytes(), + [is_infinity_byte].to_vec(), + ] + .concat() + } +} + +/// Provides a commitment engine +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct KZGCommitmentEngine { + _p: PhantomData, +} + +impl> CommitmentEngineTrait + for KZGCommitmentEngine +where + E::G1: DlogGroup, + E::G1Affine: Serialize + for<'de> Deserialize<'de>, + E::G2Affine: Serialize + for<'de> Deserialize<'de>, +{ + type CommitmentKey = UniversalKZGParam; + type Commitment = Commitment; + + fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey { + // TODO: this is just for testing, replace by grabbing from a real setup for production + let mut bytes = [0u8; 32]; + let len = label.len().min(32); + bytes[..len].copy_from_slice(&label[..len]); + let rng = &mut StdRng::from_seed(bytes); + UniversalKZGParam::gen_srs_for_testing(rng, n.next_power_of_two()) + } + + fn commit(ck: &Self::CommitmentKey, v: &[::Scalar]) -> Self::Commitment { + assert!(ck.length() >= v.len()); + Commitment { + comm: E::G1::vartime_multiscalar_mul(v, &ck.powers_of_g[..v.len()]), + } + } +} + +impl> From> + for UVKZGCommitment +where + E::G1: Group, +{ + fn from(c: Commitment) -> Self { + UVKZGCommitment(c.comm.to_affine()) + } +} + +impl> From> + for Commitment +where + E::G1: Group, +{ + fn from(c: UVKZGCommitment) -> Self { + Commitment { + comm: c.0.to_curve(), + } + } +} diff --git a/src/provider/mod.rs b/src/provider/mod.rs index dd94c657..4df6153c 100644 --- a/src/provider/mod.rs +++ b/src/provider/mod.rs @@ -3,6 +3,7 @@ // public modules to be used as an evaluation engine with Spartan pub mod hyperkzg; pub mod ipa_pc; +pub mod non_hiding_zeromorph; // crate-public modules, made crate-public mostly for tests pub(crate) mod bn256_grumpkin; @@ -11,6 +12,11 @@ pub(crate) mod pedersen; pub(crate) mod poseidon; pub(crate) mod secp_secq; pub(crate) mod traits; +// a non-hiding variant of {kzg, zeromorph} +pub(crate) mod kzg_commitment; + +#[cfg(test)] +pub(crate) mod test_utils; // crate-private modules mod keccak; @@ -18,7 +24,6 @@ mod keccak; use crate::{ provider::{ bn256_grumpkin::{bn256, grumpkin}, - hyperkzg::CommitmentEngine as HyperKZGCommitmentEngine, keccak::Keccak256Transcript, pedersen::CommitmentEngine as PedersenCommitmentEngine, poseidon::{PoseidonRO, PoseidonROCircuit}, @@ -26,9 +31,12 @@ use crate::{ }, traits::Engine, }; +use halo2curves::bn256::Bn256; use pasta_curves::{pallas, vesta}; use serde::{Deserialize, Serialize}; +use self::kzg_commitment::KZGCommitmentEngine; + /// An implementation of Nova traits with HyperKZG over the BN256 curve #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct Bn256EngineKZG; @@ -48,7 +56,7 @@ impl Engine for Bn256EngineKZG { type RO = PoseidonRO; type ROCircuit = PoseidonROCircuit; type TE = Keccak256Transcript; - type CE = HyperKZGCommitmentEngine; + type CE = KZGCommitmentEngine; } impl Engine for Bn256EngineIPA { @@ -71,6 +79,20 @@ impl Engine for GrumpkinEngine { type CE = PedersenCommitmentEngine; } +/// An implementation of the Nova `Engine` trait with BN254 curve and Zeromorph commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Bn256EngineZM; + +impl Engine for Bn256EngineZM { + type Base = bn256::Base; + type Scalar = bn256::Scalar; + type GE = bn256::Point; + type RO = PoseidonRO; + type ROCircuit = PoseidonROCircuit; + type TE = Keccak256Transcript; + type CE = KZGCommitmentEngine; +} + /// An implementation of the Nova `Engine` trait with Secp256k1 curve and Pedersen commitment scheme #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct Secp256k1Engine; @@ -130,6 +152,7 @@ impl Engine for VestaEngine { #[cfg(test)] mod tests { use crate::provider::{bn256_grumpkin::bn256, secp_secq::secp256k1, traits::DlogGroup}; + use digest::{ExtendableOutput, Update}; use group::Curve; use halo2curves::CurveExt; diff --git a/src/provider/non_hiding_zeromorph.rs b/src/provider/non_hiding_zeromorph.rs new file mode 100644 index 00000000..bfc2a602 --- /dev/null +++ b/src/provider/non_hiding_zeromorph.rs @@ -0,0 +1,816 @@ +//! Non-hiding Zeromorph scheme for Multilinear Polynomials. +//! +//! + +use crate::{ + errors::{NovaError, PCSError}, + provider::{ + kzg_commitment::{ + KZGCommitmentEngine, KZGProverKey, KZGVerifierKey, UVKZGCommitment, UniversalKZGParam, + }, + traits::DlogGroup, + }, + spartan::polys::multilinear::MultilinearPolynomial, + traits::{ + commitment::Len, evaluation::EvaluationEngineTrait, Engine as NovaEngine, Group, + TranscriptEngineTrait, TranscriptReprTrait, + }, + Commitment, +}; +use ff::{BatchInvert, Field, PrimeField}; +use group::{Curve, Group as _}; +use halo2curves::pairing::{Engine, MillerLoopResult, MultiMillerLoop}; +use itertools::Itertools as _; +use rayon::{ + iter::IntoParallelRefIterator, + prelude::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}, +}; +use ref_cast::RefCast; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use std::{borrow::Borrow, iter, marker::PhantomData}; + +/// Polynomial Evaluation +#[derive(Debug, Clone, Eq, PartialEq, Default)] +pub struct UVKZGEvaluation(pub E::Fr); + +#[derive(Debug, Clone, Eq, PartialEq, Default)] + +/// Proofs +pub struct UVKZGProof { + /// proof + pub proof: E::G1Affine, +} + +/// Polynomial and its associated types +pub type UVKZGPoly = crate::spartan::polys::univariate::UniPoly; + +#[derive(Debug, Clone, Eq, PartialEq, Default)] +/// KZG Polynomial Commitment Scheme on univariate polynomial. +/// Note: this is non-hiding, which is why we will implement traits on this token struct, +/// as we expect to have several impls for the trait pegged on the same instance of a pairing::Engine. +#[allow(clippy::upper_case_acronyms)] +pub struct UVKZGPCS { + #[doc(hidden)] + phantom: PhantomData, +} + +impl UVKZGPCS +where + E::G1: DlogGroup, +{ + /// Generate a commitment for a polynomial + /// Note that the scheme is not hidding + pub fn commit( + prover_param: impl Borrow>, + poly: &UVKZGPoly, + ) -> Result, NovaError> { + let prover_param = prover_param.borrow(); + + if poly.degree() > prover_param.powers_of_g.len() { + return Err(NovaError::PCSError(PCSError::LengthError)); + } + let C = ::vartime_multiscalar_mul( + poly.coeffs.as_slice(), + &prover_param.powers_of_g.as_slice()[..poly.coeffs.len()], + ); + Ok(UVKZGCommitment(C.to_affine())) + } + + /// On input a polynomial `p` and a point `point`, outputs a proof for the + /// same. + pub fn open( + prover_param: impl Borrow>, + polynomial: &UVKZGPoly, + point: &E::Fr, + ) -> Result<(UVKZGProof, UVKZGEvaluation), NovaError> { + let prover_param = prover_param.borrow(); + let divisor = UVKZGPoly { + coeffs: vec![-*point, E::Fr::ONE], + }; + let witness_polynomial = polynomial + .divide_with_q_and_r(&divisor) + .map(|(q, _r)| q) + .ok_or(NovaError::PCSError(PCSError::ZMError))?; + let proof = ::vartime_multiscalar_mul( + witness_polynomial.coeffs.as_slice(), + &prover_param.powers_of_g.as_slice()[..witness_polynomial.coeffs.len()], + ); + let evaluation = UVKZGEvaluation(polynomial.evaluate(point)); + + Ok(( + UVKZGProof { + proof: proof.to_affine(), + }, + evaluation, + )) + } +} + +/// `ZMProverKey` is used to generate a proof +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>" +))] +pub struct ZMProverKey { + commit_pp: KZGProverKey, + open_pp: KZGProverKey, +} + +/// `ZMVerifierKey` is used to check evaluation proofs for a given +/// commitment. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" +))] +pub struct ZMVerifierKey { + vp: KZGVerifierKey, + s_offset_h: E::G2Affine, +} + +/// Trim the universal parameters to specialize the public parameters +/// for multilinear polynomials to the given `max_degree`, and +/// returns prover key and verifier key. `supported_size` should +/// be in range `1..params.len()` +/// +/// # Panics +/// If `supported_size` is greater than `self.max_degree()`, or `self.max_degree()` is zero. +// +// TODO: important, we need a better way to handle that the commitment key should be 2^max_degree sized, +// see the runtime error in commit() below +pub fn trim( + params: &UniversalKZGParam, + max_degree: usize, +) -> (ZMProverKey, ZMVerifierKey) { + let (commit_pp, vp) = params.trim(max_degree); + let offset = params.powers_of_g.len() - max_degree; + let open_pp = { + let offset_powers_of_g1 = params.powers_of_g[offset..].to_vec(); + KZGProverKey { + powers_of_g: offset_powers_of_g1, + } + }; + let s_offset_h = params.powers_of_h[offset]; + + ( + ZMProverKey { commit_pp, open_pp }, + ZMVerifierKey { vp, s_offset_h }, + ) +} + +/// Commitments +#[derive(Debug, Clone, Eq, PartialEq, Default, Serialize, Deserialize)] +pub struct ZMCommitment( + /// the actual commitment is an affine point. + pub E::G1Affine, +); + +impl From> for ZMCommitment { + fn from(value: UVKZGCommitment) -> Self { + ZMCommitment(value.0) + } +} + +impl From> for UVKZGCommitment { + fn from(value: ZMCommitment) -> Self { + UVKZGCommitment(value.0) + } +} + +/// Polynomial Evaluation +#[derive(Debug, Clone, Eq, PartialEq, Default)] +pub struct ZMEvaluation(E::Fr); + +impl From> for ZMEvaluation { + fn from(value: UVKZGEvaluation) -> Self { + ZMEvaluation(value.0) + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Default, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>" +))] +/// Proofs +pub struct ZMProof { + /// proof + pub pi: E::G1Affine, + /// Polynomial commitment to qhat + pub cqhat: UVKZGCommitment, + /// Polynomial commitment to qk + pub ck: Vec>, +} + +#[derive(Debug, Clone, Eq, PartialEq, Default)] +/// Zeromorph Polynomial Commitment Scheme on multilinear polynomials. +/// Note: this is non-hiding, which is why we will implement the EvaluationEngineTrait on this token struct, +/// as we will have several impls for the trait pegged on the same instance of a pairing::Engine. +#[allow(clippy::upper_case_acronyms)] +pub struct ZMPCS { + #[doc(hidden)] + phantom: PhantomData<(E, NE)>, +} + +impl> ZMPCS +where + E::G1: DlogGroup, + // Note: due to the move of the bound TranscriptReprTrait on G::Base from Group to Engine + ::Base: TranscriptReprTrait, +{ + const fn protocol_name() -> &'static [u8] { + b"Zeromorph" + } + + /// Generate a commitment for a polynomial + /// Note that the scheme is not hidding + pub fn commit( + pp: impl Borrow>, + poly: &MultilinearPolynomial, + ) -> Result, NovaError> { + let pp = pp.borrow(); + if pp.commit_pp.powers_of_g.len() < poly.Z.len() { + return Err(PCSError::LengthError.into()); + } + UVKZGPCS::commit(&pp.commit_pp, UVKZGPoly::ref_cast(&poly.Z)).map(|c| c.into()) + } + + /// On input a polynomial `poly` and a point `point`, outputs a proof for the + /// same. + pub fn open( + pp: &impl Borrow>, + comm: &ZMCommitment, + poly: &MultilinearPolynomial, + point: &[E::Fr], + eval: &ZMEvaluation, + transcript: &mut impl TranscriptEngineTrait, + ) -> Result, NovaError> { + transcript.dom_sep(Self::protocol_name()); + + let pp = pp.borrow(); + if pp.commit_pp.powers_of_g.len() < poly.Z.len() { + return Err(NovaError::PCSError(PCSError::LengthError)); + } + + debug_assert_eq!(Self::commit(pp, poly).unwrap().0, comm.0); + debug_assert_eq!(poly.evaluate(point), eval.0); + + let (quotients, remainder) = quotients(poly, point); + debug_assert_eq!(quotients.len(), poly.get_num_vars()); + debug_assert_eq!(remainder, eval.0); + + // Compute the multilinear quotients q_k = q_k(X_0, ..., X_{k-1}) + let quotients_polys = quotients + .into_iter() + .map(UVKZGPoly::new) + .collect::>(); + + // Compute and absorb commitments C_{q_k} = [q_k], k = 0,...,d-1 + let q_comms = quotients_polys + .par_iter() + .map(|q| UVKZGPCS::commit(&pp.commit_pp, q)) + .collect::, _>>()?; + q_comms.iter().for_each(|c| transcript.absorb(b"quo", c)); + + // Get challenge y + let y = transcript.squeeze(b"y")?; + + // Compute the batched, lifted-degree quotient `\hat{q}` + // qq_hat = ∑_{i=0}^{num_vars-1} y^i * X^(2^num_vars - d_k - 1) * q_i(x) + let q_hat = batched_lifted_degree_quotient(y, "ients_polys); + // Compute and absorb the commitment C_q = [\hat{q}] + let q_hat_comm = UVKZGPCS::commit(&pp.commit_pp, &q_hat)?; + transcript.absorb(b"q_hat", &q_hat_comm); + + // Get challenges x and z + let x = transcript.squeeze(b"x")?; + let z = transcript.squeeze(b"z")?; + + // Compute batched degree and ZM-identity quotient polynomial pi + let (eval_scalar, (degree_check_q_scalars, zmpoly_q_scalars)) = + eval_and_quotient_scalars(y, x, z, point); + // f = z * poly.Z + q_hat + (-z * Φ_n(x) * e) + ∑_k (q_scalars_k * q_k) + let mut f = UVKZGPoly::new(poly.Z.clone()); + f *= &z; + f += &q_hat; + f[0] += eval_scalar * eval.0; + quotients_polys + .into_iter() + .zip_eq(degree_check_q_scalars) + .zip_eq(zmpoly_q_scalars) + .for_each(|((mut q, degree_check_scalar), zm_poly_scalar)| { + q *= &(degree_check_scalar + zm_poly_scalar); + f += &q; + }); + debug_assert_eq!(f.evaluate(&x), E::Fr::ZERO); + // hence uveval == Fr::ZERO + + // Compute and send proof commitment pi + let (uvproof, _uveval): (UVKZGProof<_>, UVKZGEvaluation<_>) = + UVKZGPCS::::open(&pp.open_pp, &f, &x)?; + + let proof = ZMProof { + pi: uvproof.proof, + cqhat: q_hat_comm, + ck: q_comms, + }; + + Ok(proof) + } + + /// Verifies that `value` is the evaluation at `x` of the polynomial + /// committed inside `comm`. + pub fn verify( + vk: &impl Borrow>, + transcript: &mut impl TranscriptEngineTrait, + comm: &ZMCommitment, + point: &[E::Fr], + evaluation: &ZMEvaluation, + proof: &ZMProof, + ) -> Result { + transcript.dom_sep(Self::protocol_name()); + + let vk = vk.borrow(); + + // Receive commitments [q_k] + proof.ck.iter().for_each(|c| transcript.absorb(b"quo", c)); + + // Challenge y + let y = transcript.squeeze(b"y")?; + + // Receive commitment C_{q} + transcript.absorb(b"q_hat", &proof.cqhat); + + // Challenges x, z + let x = transcript.squeeze(b"x")?; + let z = transcript.squeeze(b"z")?; + + let (eval_scalar, (mut q_scalars, zmpoly_q_scalars)) = + eval_and_quotient_scalars(y, x, z, point); + q_scalars + .iter_mut() + .zip_eq(zmpoly_q_scalars) + .for_each(|(scalar, zm_poly_scalar)| { + *scalar += zm_poly_scalar; + }); + let scalars = [vec![E::Fr::ONE, z, eval_scalar * evaluation.0], q_scalars].concat(); + let bases = [ + vec![proof.cqhat.0, comm.0, vk.vp.g], + proof.ck.iter().map(|c| c.0).collect(), + ] + .concat(); + let c = ::vartime_multiscalar_mul(&scalars, &bases).to_affine(); + + let pi = proof.pi; + + let pairing_inputs = [ + (&c, &(-vk.s_offset_h).into()), + ( + &pi, + &(E::G2::from(vk.vp.beta_h) - (vk.vp.h * x)) + .to_affine() + .into(), + ), + ]; + + let pairing_result = E::multi_miller_loop(&pairing_inputs).final_exponentiation(); + Ok(pairing_result.is_identity().into()) + } +} + +/// Computes the quotient polynomials of a given multilinear polynomial with respect to a specific input point. +/// +/// Given a multilinear polynomial `poly` and a point `point`, this function calculates the quotient polynomials `q_k` +/// and the evaluation at `point`, such that: +/// +/// ```text +/// poly - poly(point) = Σ (X_k - point_k) * q_k(X_0, ..., X_{k-1}) +/// ``` +/// +/// where `poly(point)` is the evaluation of `poly` at `point`, and each `q_k` is a polynomial in `k` variables. +/// +/// Since our evaluations are presented in order reverse from the coefficients, if we want to interpret index q_k +/// to be the k-th coefficient in the polynomials returned here, the equality that holds is: +/// +/// ```text +/// poly - poly(point) = Σ (X_{n-1-k} - point_{n-1-k}) * q_k(X_0, ..., X_{k-1}) +/// ``` +/// +fn quotients(poly: &MultilinearPolynomial, point: &[F]) -> (Vec>, F) { + let num_var = poly.get_num_vars(); + assert_eq!(num_var, point.len()); + + let mut remainder = poly.Z.to_vec(); + let mut quotients = point + .iter() + .enumerate() + .map(|(idx, x_i)| { + let (remainder_lo, remainder_hi) = remainder.split_at_mut(1 << (num_var - 1 - idx)); + let mut quotient = vec![F::ZERO; remainder_lo.len()]; + + quotient + .par_iter_mut() + .zip_eq(&*remainder_lo) + .zip_eq(&*remainder_hi) + .for_each(|((q, r_lo), r_hi)| { + *q = *r_hi - *r_lo; + }); + remainder_lo + .par_iter_mut() + .zip_eq(remainder_hi) + .for_each(|(r_lo, r_hi)| { + *r_lo += (*r_hi - r_lo as &_) * x_i; + }); + + remainder.truncate(1 << (num_var - 1 - idx)); + + quotient + }) + .collect::>>(); + quotients.reverse(); + + (quotients, remainder[0]) +} + +// Compute the batched, lifted-degree quotient `\hat{q}` +fn batched_lifted_degree_quotient( + y: F, + quotients_polys: &[UVKZGPoly], +) -> UVKZGPoly { + let num_vars = quotients_polys.len(); + + let powers_of_y = (0..num_vars) + .scan(F::ONE, |acc, _| { + let val = *acc; + *acc *= y; + Some(val) + }) + .collect::>(); + + #[allow(clippy::disallowed_methods)] + let q_hat = powers_of_y + .iter() + .zip_eq(quotients_polys.iter().map(|qp| qp.as_ref())) + .enumerate() + .fold( + vec![F::ZERO; 1 << num_vars], + |mut q_hat, (idx, (power_of_y, q))| { + let offset = q_hat.len() - (1 << idx); + q_hat[offset..] + .par_iter_mut() + .zip(q) + .for_each(|(q_hat, q)| { + *q_hat += *power_of_y * *q; + }); + q_hat + }, + ); + UVKZGPoly::new(q_hat) +} + +/// Computes some key terms necessary for computing the partially evaluated univariate ZM polynomial +fn eval_and_quotient_scalars(y: F, x: F, z: F, point: &[F]) -> (F, (Vec, Vec)) { + let num_vars = point.len(); + + // squares_of_x = [x, x^2, .. x^{2^k}, .. x^{2^num_vars}] + let squares_of_x = iter::successors(Some(x), |&x| Some(x.square())) + .take(num_vars + 1) + .collect::>(); + // offsets_of_x = [Π_{j=i}^{num_vars-1} x^(2^j), i ∈ [0, num_vars-1]] = [x^(2^num_vars - d_i - 1), i ∈ [0, num_vars-1]] + let offsets_of_x = { + let mut offsets_of_x = squares_of_x + .iter() + .rev() + .skip(1) + .scan(F::ONE, |state, power_of_x| { + *state *= power_of_x; + Some(*state) + }) + .collect::>(); + offsets_of_x.reverse(); + offsets_of_x + }; + + // vs = [ (x^(2^num_vars) - 1) / (x^(2^i) - 1), i ∈ [0, num_vars-1]] + // Note Φ_(n-i)(x^(2^i)) = (x^(2^i))^(2^(n-i) - 1) / (x^(2^i) - 1) = (x^(2^num_vars) - 1) / (x^(2^i) - 1) = vs[i] + // Φ_(n-i-1)(x^(2^(i+1))) = (x^(2^(i+1)))^(2^(n-i-1)) - 1 / (x^(2^(i+1)) - 1) = (x^(2^num_vars) - 1) / (x^(2^(i+1)) - 1) = vs[i+1] + let vs = { + let v_numer = squares_of_x[num_vars] - F::ONE; + let mut v_denoms = squares_of_x + .iter() + .map(|square_of_x| *square_of_x - F::ONE) + .collect::>(); + v_denoms.iter_mut().batch_invert(); + v_denoms + .iter() + .map(|v_denom| v_numer * v_denom) + .collect::>() + }; + + // q_scalars = [- (y^i * x^(2^num_vars - d_i - 1) + z * (x^(2^i) * vs[i+1] - u_i * vs[i])), i ∈ [0, num_vars-1]] + // = [- (y^i * x^(2^num_vars - d_i - 1) + z * (x^(2^i) * Φ_(n-i-1)(x^(2^(i+1))) - u_i * Φ_(n-i)(x^(2^i)))), i ∈ [0, num_vars-1]] + #[allow(clippy::disallowed_methods)] + let q_scalars = iter::successors(Some(F::ONE), |acc| Some(*acc * y)).take(num_vars) + .zip_eq(offsets_of_x) + // length: num_vars + 1 + .zip(squares_of_x) + // length: num_vars + 1 + .zip(&vs) + .zip_eq(&vs[1..]) + .zip_eq(point.iter().rev()) // assume variables come in BE form + .map( + |(((((power_of_y, offset_of_x), square_of_x), v_i), v_j), u_i)| { + (-(power_of_y * offset_of_x), -(z * (square_of_x * v_j - *u_i * v_i))) + }, + ) + .unzip(); + + // -vs[0] * z = -z * (x^(2^num_vars) - 1) / (x - 1) = -z Φ_n(x) + (-vs[0] * z, q_scalars) +} + +impl>> + EvaluationEngineTrait for ZMPCS +where + E::G1: DlogGroup, + E::G1Affine: Serialize + DeserializeOwned, + E::G2Affine: Serialize + DeserializeOwned, + ::Base: TranscriptReprTrait, // Note: due to the move of the bound TranscriptReprTrait on G::Base from Group to Engine +{ + type ProverKey = ZMProverKey; + type VerifierKey = ZMVerifierKey; + + type EvaluationArgument = ZMProof; + + fn setup(ck: &UniversalKZGParam) -> (Self::ProverKey, Self::VerifierKey) { + trim(ck, ck.length() - 1) + } + + fn prove( + _ck: &UniversalKZGParam, + pk: &Self::ProverKey, + transcript: &mut NE::TE, + comm: &Commitment, + poly: &[NE::Scalar], + point: &[NE::Scalar], + eval: &NE::Scalar, + ) -> Result { + let commitment = ZMCommitment::from(UVKZGCommitment::from(*comm)); + let polynomial = MultilinearPolynomial::new(poly.to_vec()); + let evaluation = ZMEvaluation(*eval); + + ZMPCS::open(pk, &commitment, &polynomial, point, &evaluation, transcript) + } + + fn verify( + vk: &Self::VerifierKey, + transcript: &mut NE::TE, + comm: &Commitment, + point: &[NE::Scalar], + eval: &NE::Scalar, + arg: &Self::EvaluationArgument, + ) -> Result<(), NovaError> { + let commitment = ZMCommitment::from(UVKZGCommitment::from(*comm)); + let evaluation = ZMEvaluation(*eval); + + if !ZMPCS::verify(vk, transcript, &commitment, point, &evaluation, arg)? { + return Err(NovaError::UnSat); + } + Ok(()) + } +} + +#[cfg(test)] +mod test { + + use ff::{Field, PrimeField}; + use halo2curves::bn256::Bn256; + use halo2curves::bn256::Fr as Scalar; + use itertools::Itertools as _; + use rand_chacha::ChaCha20Rng; + use rand_core::SeedableRng; + + use super::quotients; + use crate::{ + provider::{ + non_hiding_zeromorph::UVKZGPoly, + non_hiding_zeromorph::{batched_lifted_degree_quotient, eval_and_quotient_scalars, ZMPCS}, + test_utils::prove_verify_from_num_vars, + Bn256EngineZM, + }, + spartan::polys::multilinear::MultilinearPolynomial, + }; + + #[test] + fn test_multiple_polynomial_size() { + for num_vars in [4, 5, 6] { + prove_verify_from_num_vars::<_, ZMPCS>(num_vars); + } + } + + #[test] + fn test_quotients() { + // Define size parameters + let num_vars = 4; // Example number of variables for the multilinear polynomial + + // Construct a random multilinear polynomial f, and u such that f(u) = v. + let mut rng = ChaCha20Rng::from_seed([0u8; 32]); + let poly = MultilinearPolynomial::random(num_vars, &mut rng); + let u_challenge: Vec<_> = (0..num_vars).map(|_| Scalar::random(&mut rng)).collect(); + let v_evaluation = poly.evaluate(&u_challenge); + + // Compute the multilinear quotients q_k = q_k(X_0, ..., X_{k-1}) + let (quotients, constant_term) = quotients(&poly, &u_challenge); + + // Assert that the constant term is equal to v_evaluation + assert_eq!(constant_term, v_evaluation, "The constant term should be equal to the evaluation of the polynomial at the challenge point."); + + // Check that the identity holds for a random evaluation point z + // poly - poly(z) = Σ (X_k - z_k) * q_k(X_0, ..., X_{k-1}) + // except for our inversion of coefficient order in polynomials and points (see below) + let z_challenge: Vec<_> = (0..num_vars).map(|_| Scalar::random(&mut rng)).collect(); + let mut result = poly.evaluate(&z_challenge); + result -= v_evaluation; + + for (k, q_k) in quotients.iter().enumerate() { + let q_k_poly = MultilinearPolynomial::new(q_k.clone()); + // quotient polynomials are coefficiented in reverse order from evaluation + // IOW in LE this should be let z_partial = &z_challenge[..k]; + let z_partial = &z_challenge[z_challenge.len() - k..]; + + let q_k_eval = q_k_poly.evaluate(z_partial); + // quotient polynomials are coefficiented in reverse order from evaluation + // IOW in LE this should be + // result -= (z_challenge[k] - u_challenge[k]) * q_k_eval; + result -= (z_challenge[z_challenge.len() - k - 1] - u_challenge[z_challenge.len() - k - 1]) + * q_k_eval; + } + + // Assert that the result is zero, which verifies the correctness of the quotients + assert!( + bool::from(result.is_zero()), + "The computed quotients should satisfy the polynomial identity." + ); + } + + #[test] + fn test_batched_lifted_degree_quotient() { + let mut rng = ChaCha20Rng::from_seed([0u8; 32]); + + let num_vars = 3; + let n = 1 << num_vars; // Assuming N = 2^num_vars + + // Define mock q_k with deg(q_k) = 2^k - 1 + let q_0 = UVKZGPoly::new(vec![Scalar::one()]); + let q_1 = UVKZGPoly::new(vec![Scalar::from(2), Scalar::from(3)]); + let q_2 = UVKZGPoly::new(vec![ + Scalar::from(4), + Scalar::from(5), + Scalar::from(6), + Scalar::from(7), + ]); + let quotients = vec![q_0, q_1, q_2]; + + // Generate a random y challenge + let y_challenge = Scalar::random(&mut rng); + + // Compute batched quotient \hat{q} using the function + let batched_quotient = batched_lifted_degree_quotient(y_challenge, "ients); + + // Now explicitly define q_k_lifted = X^{N-2^k} * q_k and compute the expected batched result + let q_0_lifted = [vec![Scalar::zero(); n - 1], vec![Scalar::one()]].concat(); + let q_1_lifted = [ + vec![Scalar::zero(); n - 2], + vec![Scalar::from(2), Scalar::from(3)], + ] + .concat(); + let q_2_lifted = [ + vec![Scalar::zero(); n - 4], + vec![ + Scalar::from(4), + Scalar::from(5), + Scalar::from(6), + Scalar::from(7), + ], + ] + .concat(); + + // Explicitly compute \hat{q} + let mut batched_quotient_expected = vec![Scalar::zero(); n]; + batched_quotient_expected + .iter_mut() + .zip_eq(q_0_lifted) + .zip_eq(q_1_lifted) + .zip_eq(q_2_lifted) + .for_each(|(((res, q_0), q_1), q_2)| { + *res += q_0 + y_challenge * q_1 + y_challenge * y_challenge * q_2; + }); + + // Compare the computed and expected batched quotients + assert_eq!(batched_quotient, UVKZGPoly::new(batched_quotient_expected)); + } + + #[test] + fn test_partially_evaluated_quotient_zeta() { + let mut rng = ChaCha20Rng::from_seed([0u8; 32]); + + let num_vars = 3; + + // Define some mock q_k with deg(q_k) = 2^k - 1 + let _q_0 = UVKZGPoly::new(vec![Scalar::one()]); + let _q_1 = UVKZGPoly::new(vec![Scalar::from(2), Scalar::from(3)]); + let _q_2 = UVKZGPoly::new(vec![ + Scalar::from(4), + Scalar::from(5), + Scalar::from(6), + Scalar::from(7), + ]); + + let y_challenge = Scalar::random(&mut rng); + + let x_challenge = Scalar::random(&mut rng); + + // Unused in this test + let u_challenge: Vec<_> = (0..num_vars).map(|_| Scalar::random(&mut rng)).collect(); + let z_challenge = Scalar::random(&mut rng); + + // Construct ζ_x using the function + let (_eval_scalar, (zeta_x_scalars, _right_quo_scalars)) = + eval_and_quotient_scalars(y_challenge, x_challenge, z_challenge, &u_challenge); + + // Now construct ζ_x explicitly + let n: u64 = 1 << num_vars; + // q_batched - \sum_k q_k * y^k * x^{N - deg(q_k) - 1} + assert_eq!(zeta_x_scalars[0], -x_challenge.pow([n - 1])); + assert_eq!( + zeta_x_scalars[1], + -y_challenge * x_challenge.pow_vartime([n - 1 - 1]) + ); + assert_eq!( + zeta_x_scalars[2], + -y_challenge * y_challenge * x_challenge.pow_vartime([n - 3 - 1]) + ); + } + + // Evaluate phi using an inefficient formula + fn phi(challenge: F, n: usize) -> F { + let length = 1 << n; + let mut result = F::ZERO; + let mut current = F::ONE; // Start with x^0 + + for _ in 0..length { + result += current; + current *= challenge; // Increment the power of x for the next iteration + } + + result + } + + #[test] + fn test_partially_evaluated_quotient_z() { + let num_vars: usize = 3; + + let mut rng = ChaCha20Rng::from_seed([0u8; 32]); + + // Define some mock q_k with deg(q_k) = 2^k - 1 + let _q_0 = UVKZGPoly::new(vec![Scalar::one()]); + let _q_1 = UVKZGPoly::new(vec![Scalar::from(2), Scalar::from(3)]); + let _q_2 = UVKZGPoly::new(vec![ + Scalar::from(4), + Scalar::from(5), + Scalar::from(6), + Scalar::from(7), + ]); + + // Unused in this test + let y_challenge = Scalar::random(&mut rng); + + let x_challenge = Scalar::random(&mut rng); + let z_challenge = Scalar::random(&mut rng); + + let u_challenge: Vec<_> = (0..num_vars).map(|_| Scalar::random(&mut rng)).collect(); + + // Construct Z_x using the function + let (_eval_scalar, (_left_quo_scalars, zeta_x_scalars)) = + eval_and_quotient_scalars(y_challenge, x_challenge, z_challenge, &u_challenge); + + // beware the Nova coefficient evaluation order! + let u_rev = { + let mut res = u_challenge.clone(); + res.reverse(); + res + }; + + // Compute Z_x directly + for k in 0..num_vars { + let x_pow_2k = x_challenge.pow([1 << k]); + let x_pow_2kp1 = x_challenge.pow([1 << (k + 1)]); + let mut scalar = + x_pow_2k * phi(x_pow_2kp1, num_vars - k - 1) - u_rev[k] * phi(x_pow_2k, num_vars - k); + scalar *= z_challenge; + scalar *= -Scalar::ONE; + assert_eq!(zeta_x_scalars[k], scalar); + } + } +} diff --git a/src/provider/test_utils/mod.rs b/src/provider/test_utils/mod.rs new file mode 100644 index 00000000..7377c5ea --- /dev/null +++ b/src/provider/test_utils/mod.rs @@ -0,0 +1,115 @@ +//! Utilities for provider module. +//! Contains utilities for testing and benchmarking. +use crate::spartan::polys::multilinear::MultilinearPolynomial; +use crate::traits::{commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine}; +use ff::Field; +use rand::rngs::StdRng; +use rand_core::{CryptoRng, RngCore}; + +/// Returns a random polynomial, a point and calculate its evaluation. +fn random_poly_with_eval( + num_vars: usize, + mut rng: &mut R, +) -> ( + MultilinearPolynomial<::Scalar>, + Vec<::Scalar>, + ::Scalar, +) { + // Generate random polynomial and point. + let poly = MultilinearPolynomial::random(num_vars, &mut rng); + let point = (0..num_vars) + .map(|_| ::Scalar::random(&mut rng)) + .collect::>(); + + // Calculation evaluation of point over polynomial. + let eval = MultilinearPolynomial::evaluate_with(poly.evaluations(), &point); + + (poly, point, eval) +} + +/// Methods used to test the prove and verify flow of [`MultilinearPolynomial`] Commitment Schemes +/// (PCS). +/// +/// Generates a random polynomial and point from a seed to test a proving/verifying flow of one +/// of our [`EvaluationEngine`]. +pub(crate) fn prove_verify_from_num_vars>(num_vars: usize) { + use rand_core::SeedableRng; + + let mut rng = rand::rngs::StdRng::seed_from_u64(num_vars as u64); + + let (poly, point, eval) = random_poly_with_eval::(num_vars, &mut rng); + + // Mock commitment key. + let ck = E::CE::setup(b"test", 1 << num_vars); + // Commits to the provided vector using the provided generators. + let commitment = E::CE::commit(&ck, poly.evaluations()); + + prove_verify_with::(&ck, &commitment, &poly, &point, &eval, true) +} + +fn prove_verify_with>( + ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, + commitment: &<::CE as CommitmentEngineTrait>::Commitment, + poly: &MultilinearPolynomial<::Scalar>, + point: &[::Scalar], + eval: &::Scalar, + evaluate_bad_proof: bool, +) { + use crate::traits::TranscriptEngineTrait; + use std::ops::Add; + + // Generate Prover and verifier key for given commitment key. + let (prover_key, verifier_key) = EE::setup(ck); + + // Generate proof. + let mut prover_transcript = E::TE::new(b"TestEval"); + let proof = EE::prove( + ck, + &prover_key, + &mut prover_transcript, + commitment, + poly.evaluations(), + point, + eval, + ) + .unwrap(); + let pcp = prover_transcript.squeeze(b"c").unwrap(); + + // Verify proof. + let mut verifier_transcript = E::TE::new(b"TestEval"); + EE::verify( + &verifier_key, + &mut verifier_transcript, + commitment, + point, + eval, + &proof, + ) + .unwrap(); + let pcv = verifier_transcript.squeeze(b"c").unwrap(); + + // Check if the prover transcript and verifier transcript are kept in the same state. + assert_eq!(pcp, pcv); + + if evaluate_bad_proof { + // Generate another point to verify proof. Also produce eval. + let altered_verifier_point = point + .iter() + .map(|s| s.add(::Scalar::ONE)) + .collect::>(); + let altered_verifier_eval = + MultilinearPolynomial::evaluate_with(poly.evaluations(), &altered_verifier_point); + + // Verify proof, should fail. + let mut verifier_transcript = E::TE::new(b"TestEval"); + assert!(EE::verify( + &verifier_key, + &mut verifier_transcript, + commitment, + &altered_verifier_point, + &altered_verifier_eval, + &proof, + ) + .is_err()); + } +} diff --git a/src/provider/traits.rs b/src/provider/traits.rs index 28367b01..c96a5ea2 100644 --- a/src/provider/traits.rs +++ b/src/provider/traits.rs @@ -96,19 +96,6 @@ pub trait DlogGroup: fn to_coordinates(&self) -> (::Base, ::Base, bool); } -/// A trait that defines extensions to the DlogGroup trait, to be implemented for -/// elliptic curve groups that are pairing friendly -pub trait PairingGroup: DlogGroup { - /// A type representing the second group - type G2: DlogGroup; - - /// A type representing the target group - type GT: PartialEq + Eq; - - /// A method to compute a pairing - fn pairing(p: &Self, q: &Self::G2) -> Self::GT; -} - /// This implementation behaves in ways specific to the halo2curves suite of curves in: // - to_coordinates, // - vartime_multiscalar_mul, where it does not call into accelerated implementations. diff --git a/src/r1cs/mod.rs b/src/r1cs/mod.rs index 56ef1394..3b2b726a 100644 --- a/src/r1cs/mod.rs +++ b/src/r1cs/mod.rs @@ -480,7 +480,7 @@ impl RelaxedR1CSInstance { let mut r_instance = RelaxedR1CSInstance::default(ck, S); r_instance.comm_W = instance.comm_W; r_instance.u = E::Scalar::ONE; - r_instance.X = instance.X.clone(); + r_instance.X.clone_from(&instance.X); r_instance } diff --git a/src/spartan/direct.rs b/src/spartan/direct.rs index 80986a2a..98e60287 100644 --- a/src/spartan/direct.rs +++ b/src/spartan/direct.rs @@ -169,6 +169,7 @@ mod tests { use ::bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; use core::marker::PhantomData; use ff::PrimeField; + use halo2curves::bn256::Bn256; #[derive(Clone, Debug, Default)] struct CubicCircuit { @@ -229,7 +230,7 @@ mod tests { test_direct_snark_with::(); type E2 = Bn256EngineKZG; - type EE2 = crate::provider::hyperkzg::EvaluationEngine; + type EE2 = crate::provider::hyperkzg::EvaluationEngine; type S2 = crate::spartan::snark::RelaxedR1CSSNARK; test_direct_snark_with::(); @@ -277,7 +278,7 @@ mod tests { assert!(res.is_ok()); // set input to the next step - z_i = z_i_plus_one.clone(); + z_i.clone_from(&z_i_plus_one); } // sanity: check the claimed output with a direct computation of the same diff --git a/src/spartan/math.rs b/src/spartan/math.rs index 691fec5d..22dbce17 100644 --- a/src/spartan/math.rs +++ b/src/spartan/math.rs @@ -1,23 +1,8 @@ pub trait Math { - fn pow2(self) -> usize; - fn get_bits(self, num_bits: usize) -> Vec; fn log_2(self) -> usize; } impl Math for usize { - #[inline] - fn pow2(self) -> usize { - let base: usize = 2; - base.pow(self as u32) - } - - /// Returns the `num_bits` from n in a canonical order - fn get_bits(self, num_bits: usize) -> Vec { - (0..num_bits) - .map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0)) - .collect::>() - } - fn log_2(self) -> usize { assert_ne!(self, 0); diff --git a/src/spartan/mod.rs b/src/spartan/mod.rs index d74133cf..55d640f2 100644 --- a/src/spartan/mod.rs +++ b/src/spartan/mod.rs @@ -26,12 +26,9 @@ use rayon::{iter::IntoParallelRefIterator, prelude::*}; // Creates a vector of the first `n` powers of `s`. fn powers(s: &E::Scalar, n: usize) -> Vec { assert!(n >= 1); - let mut powers = Vec::with_capacity(n); - powers.push(E::Scalar::ONE); - for i in 1..n { - powers.push(powers[i - 1] * s); - } - powers + std::iter::successors(Some(E::Scalar::ONE), |&x| Some(x * s)) + .take(n) + .collect() } /// A type that holds a witness to a polynomial evaluation instance diff --git a/src/spartan/polys/mod.rs b/src/spartan/polys/mod.rs index a1a192ef..e26fd782 100644 --- a/src/spartan/polys/mod.rs +++ b/src/spartan/polys/mod.rs @@ -2,6 +2,9 @@ pub(crate) mod eq; pub(crate) mod identity; pub(crate) mod masked_eq; +#[cfg(feature = "bench")] pub(crate) mod multilinear; +#[cfg(not(feature = "bench"))] +pub mod multilinear; pub(crate) mod power; pub(crate) mod univariate; diff --git a/src/spartan/polys/multilinear.rs b/src/spartan/polys/multilinear.rs index 45710f79..68695872 100644 --- a/src/spartan/polys/multilinear.rs +++ b/src/spartan/polys/multilinear.rs @@ -1,15 +1,12 @@ //! Main components: //! - `MultilinearPolynomial`: Dense representation of multilinear polynomials, represented by evaluations over all possible binary inputs. //! - `SparsePolynomial`: Efficient representation of sparse multilinear polynomials, storing only non-zero evaluations. - use std::ops::{Add, Index}; use ff::PrimeField; use itertools::Itertools as _; -use rayon::prelude::{ - IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, - IntoParallelRefMutIterator, ParallelIterator, -}; +use rand_core::{CryptoRng, RngCore}; +use rayon::prelude::*; use serde::{Deserialize, Serialize}; use crate::spartan::{math::Math, polys::eq::EqPolynomial}; @@ -36,6 +33,7 @@ pub struct MultilinearPolynomial { pub(crate) Z: Vec, // evaluations of the polynomial in all the 2^num_vars Boolean inputs } +#[allow(clippy::len_without_is_empty)] impl MultilinearPolynomial { /// Creates a new `MultilinearPolynomial` from the given evaluations. /// @@ -47,6 +45,11 @@ impl MultilinearPolynomial { MultilinearPolynomial { num_vars, Z } } + /// evaluations of the polynomial in all the 2^num_vars Boolean inputs + pub fn evaluations(&self) -> &[Scalar] { + &self.Z[..] + } + /// Returns the number of variables in the multilinear polynomial pub const fn get_num_vars(&self) -> usize { self.num_vars @@ -57,6 +60,15 @@ impl MultilinearPolynomial { self.Z.len() } + /// Returns a random polynomial + pub fn random(num_vars: usize, mut rng: &mut R) -> Self { + MultilinearPolynomial::new( + std::iter::from_fn(|| Some(Scalar::random(&mut rng))) + .take(1 << num_vars) + .collect(), + ) + } + /// Binds the polynomial's top variable using the given scalar. /// /// This operation modifies the polynomial in-place. @@ -169,7 +181,7 @@ mod tests { use super::*; use rand_chacha::ChaCha20Rng; - use rand_core::{CryptoRng, RngCore, SeedableRng}; + use rand_core::SeedableRng; fn make_mlp(len: usize, value: F) -> MultilinearPolynomial { MultilinearPolynomial { diff --git a/src/spartan/polys/univariate.rs b/src/spartan/polys/univariate.rs index 4bb96c5a..ec4c0b19 100644 --- a/src/spartan/polys/univariate.rs +++ b/src/spartan/polys/univariate.rs @@ -1,17 +1,24 @@ //! Main components: //! - `UniPoly`: an univariate dense polynomial in coefficient form (big endian), //! - `CompressedUniPoly`: a univariate dense polynomial, compressed (omitted linear term), in coefficient form (little endian), +use std::{ + cmp::Ordering, + ops::{AddAssign, Index, IndexMut, MulAssign}, +}; + use ff::PrimeField; -use rayon::prelude::{IntoParallelIterator, ParallelIterator}; +use rayon::prelude::{IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; +use ref_cast::RefCast; use serde::{Deserialize, Serialize}; use crate::traits::{Group, TranscriptReprTrait}; // ax^2 + bx + c stored as vec![c, b, a] // ax^3 + bx^2 + cx + d stored as vec![d, c, b, a] -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, RefCast)] +#[repr(transparent)] pub struct UniPoly { - coeffs: Vec, + pub coeffs: Vec, } // ax^2 + bx + c stored as vec![c, a] @@ -22,6 +29,61 @@ pub struct CompressedUniPoly { } impl UniPoly { + pub fn new(coeffs: Vec) -> Self { + let mut res = UniPoly { coeffs }; + res.truncate_leading_zeros(); + res + } + + fn zero() -> Self { + UniPoly::new(Vec::new()) + } + + /// Divide self by another polynomial, and returns the + /// quotient and remainder. + pub fn divide_with_q_and_r(&self, divisor: &Self) -> Option<(UniPoly, UniPoly)> { + if self.is_zero() { + Some((UniPoly::zero(), UniPoly::zero())) + } else if divisor.is_zero() { + panic!("Dividing by zero polynomial") + } else if self.degree() < divisor.degree() { + Some((UniPoly::zero(), self.clone())) + } else { + // Now we know that self.degree() >= divisor.degree(); + let mut quotient = vec![Scalar::ZERO; self.degree() - divisor.degree() + 1]; + let mut remainder: UniPoly = self.clone(); + // Can unwrap here because we know self is not zero. + let divisor_leading_inv = divisor.leading_coefficient().unwrap().invert().unwrap(); + while !remainder.is_zero() && remainder.degree() >= divisor.degree() { + let cur_q_coeff = *remainder.leading_coefficient().unwrap() * divisor_leading_inv; + let cur_q_degree = remainder.degree() - divisor.degree(); + quotient[cur_q_degree] = cur_q_coeff; + + for (i, div_coeff) in divisor.coeffs.iter().enumerate() { + remainder.coeffs[cur_q_degree + i] -= &(cur_q_coeff * div_coeff); + } + while let Some(true) = remainder.coeffs.last().map(|c| c == &Scalar::ZERO) { + remainder.coeffs.pop(); + } + } + Some((UniPoly::new(quotient), remainder)) + } + } + + pub fn is_zero(&self) -> bool { + self.coeffs.is_empty() || self.coeffs.iter().all(|c| c == &Scalar::ZERO) + } + + fn truncate_leading_zeros(&mut self) { + while self.coeffs.last().map_or(false, |c| c == &Scalar::ZERO) { + self.coeffs.pop(); + } + } + + pub fn leading_coefficient(&self) -> Option<&Scalar> { + self.coeffs.last() + } + pub fn from_evals(evals: &[Scalar]) -> Self { // we only support degree-2 or degree-3 univariate polynomials assert!(evals.len() == 3 || evals.len() == 4); @@ -115,11 +177,61 @@ impl TranscriptReprTrait for UniPoly { .collect::>() } } + +impl Index for UniPoly { + type Output = Scalar; + + fn index(&self, index: usize) -> &Self::Output { + &self.coeffs[index] + } +} + +impl IndexMut for UniPoly { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + &mut self.coeffs[index] + } +} + +impl AddAssign<&Scalar> for UniPoly { + fn add_assign(&mut self, rhs: &Scalar) { + self.coeffs.par_iter_mut().for_each(|c| *c += rhs); + } +} + +impl MulAssign<&Scalar> for UniPoly { + fn mul_assign(&mut self, rhs: &Scalar) { + self.coeffs.par_iter_mut().for_each(|c| *c *= rhs); + } +} + +impl AddAssign<&Self> for UniPoly { + fn add_assign(&mut self, rhs: &Self) { + let ordering = self.coeffs.len().cmp(&rhs.coeffs.len()); + #[allow(clippy::disallowed_methods)] + for (lhs, rhs) in self.coeffs.iter_mut().zip(&rhs.coeffs) { + *lhs += rhs; + } + if matches!(ordering, Ordering::Less) { + self + .coeffs + .extend(rhs.coeffs[self.coeffs.len()..].iter().cloned()); + } + if matches!(ordering, Ordering::Equal) { + self.truncate_leading_zeros(); + } + } +} + +impl AsRef> for UniPoly { + fn as_ref(&self) -> &Vec { + &self.coeffs + } +} + #[cfg(test)] mod tests { - use crate::provider::{bn256_grumpkin, secp_secq::secp256k1}; - use super::*; + use crate::provider::{bn256_grumpkin, secp_secq::secp256k1}; fn test_from_evals_quad_with() { // polynomial is 2x^2 + 3x + 1