From 3e4c8ef6359da9ef4a2cc129dead8aa0709f52f3 Mon Sep 17 00:00:00 2001 From: Srinath Setty Date: Thu, 16 Nov 2023 18:11:20 -0800 Subject: [PATCH] Refactor traits that allows implementing different engines for the same curve cycle (#263) * rename Group to Engine * port tests * checkpoint * checkpoint * compiles * update benchmarks and tests to use Engine * rename GroupExt to DlogGroup * move get_curve_params to Group and rename * fix doc test; cargo fmt * inline single trait requirements; fix comments * cut comment --- benches/compressed-snark.rs | 65 ++- benches/compute-digest.rs | 23 +- benches/recursive-snark.rs | 37 +- benches/sha256.rs | 20 +- examples/minroot.rs | 51 +- examples/signature.rs | 312 ------------ src/bellpepper/mod.rs | 18 +- src/bellpepper/r1cs.rs | 40 +- src/bellpepper/shape_cs.rs | 28 +- src/bellpepper/solver.rs | 4 +- src/bellpepper/test_shape_cs.rs | 50 +- src/circuit.rs | 207 ++++---- src/digest.rs | 39 +- src/gadgets/ecc.rs | 375 +++++++-------- src/gadgets/nonnative/util.rs | 8 +- src/gadgets/r1cs.rs | 120 ++--- src/gadgets/utils.rs | 28 +- src/lib.rs | 827 +++++++++++++++----------------- src/nifs.rs | 143 +++--- src/provider/bn256_grumpkin.rs | 16 +- src/provider/ipa_pc.rs | 191 ++++---- src/provider/keccak.rs | 66 +-- src/provider/mod.rs | 23 +- src/provider/pasta.rs | 33 +- src/provider/pedersen.rs | 227 +++++---- src/provider/poseidon.rs | 51 +- src/provider/secp_secq.rs | 16 +- src/r1cs/mod.rs | 266 +++++----- src/r1cs/sparse.rs | 9 +- src/spartan/direct.rs | 113 +++-- src/spartan/mod.rs | 56 +-- src/spartan/polys/univariate.rs | 5 +- src/spartan/ppsnark.rs | 596 +++++++++++------------ src/spartan/snark.rs | 186 +++---- src/spartan/sumcheck.rs | 110 ++--- src/traits/circuit.rs | 5 +- src/traits/commitment.rs | 46 +- src/traits/evaluation.rs | 26 +- src/traits/mod.rs | 52 +- src/traits/snark.rs | 35 +- 40 files changed, 2097 insertions(+), 2426 deletions(-) delete mode 100644 examples/signature.rs diff --git a/benches/compressed-snark.rs b/benches/compressed-snark.rs index 32900e6f6..95fdccd01 100644 --- a/benches/compressed-snark.rs +++ b/benches/compressed-snark.rs @@ -5,27 +5,28 @@ use core::marker::PhantomData; use criterion::*; use ff::PrimeField; use nova_snark::{ + provider::pasta::{PallasEngine, VestaEngine}, traits::{ circuit::{StepCircuit, TrivialCircuit}, snark::RelaxedR1CSSNARKTrait, - Group, + Engine, }, CompressedSNARK, PublicParams, RecursiveSNARK, }; use std::time::Duration; -type G1 = pasta_curves::pallas::Point; -type G2 = pasta_curves::vesta::Point; -type EE1 = nova_snark::provider::ipa_pc::EvaluationEngine; -type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine; +type E1 = PallasEngine; +type E2 = VestaEngine; +type EE1 = nova_snark::provider::ipa_pc::EvaluationEngine; +type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine; // SNARKs without computational commitments -type S1 = nova_snark::spartan::snark::RelaxedR1CSSNARK; -type S2 = nova_snark::spartan::snark::RelaxedR1CSSNARK; +type S1 = nova_snark::spartan::snark::RelaxedR1CSSNARK; +type S2 = nova_snark::spartan::snark::RelaxedR1CSSNARK; // SNARKs with computational commitments -type SS1 = nova_snark::spartan::ppsnark::RelaxedR1CSSNARK; -type SS2 = nova_snark::spartan::ppsnark::RelaxedR1CSSNARK; -type C1 = NonTrivialCircuit<::Scalar>; -type C2 = TrivialCircuit<::Scalar>; +type SS1 = nova_snark::spartan::ppsnark::RelaxedR1CSSNARK; +type SS2 = nova_snark::spartan::ppsnark::RelaxedR1CSSNARK; +type C1 = NonTrivialCircuit<::Scalar>; +type C2 = TrivialCircuit<::Scalar>; // To run these benchmarks, first download `criterion` with `cargo install cargo install cargo-criterion`. // Then `cargo criterion --bench compressed-snark`. The results are located in `target/criterion/data/`. @@ -66,7 +67,7 @@ fn bench_compressed_snark(c: &mut Criterion) { let c_secondary = TrivialCircuit::default(); // Produce public parameters - let pp = PublicParams::::setup( + let pp = PublicParams::::setup( &c_primary, &c_secondary, &*S1::ck_floor(), @@ -78,12 +79,12 @@ fn bench_compressed_snark(c: &mut Criterion) { // produce a recursive SNARK let num_steps = 3; - let mut recursive_snark: RecursiveSNARK = RecursiveSNARK::new( + let mut recursive_snark: RecursiveSNARK = RecursiveSNARK::new( &pp, &c_primary, &c_secondary, - &[::Scalar::from(2u64)], - &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], ) .unwrap(); @@ -95,8 +96,8 @@ fn bench_compressed_snark(c: &mut Criterion) { let res = recursive_snark.verify( &pp, i + 1, - &[::Scalar::from(2u64)], - &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], ); assert!(res.is_ok()); } @@ -123,8 +124,8 @@ fn bench_compressed_snark(c: &mut Criterion) { .verify( black_box(&vk), black_box(num_steps), - black_box(&[::Scalar::from(2u64)]), - black_box(&[::Scalar::from(2u64)]), + black_box(&[::Scalar::from(2u64)]), + black_box(&[::Scalar::from(2u64)]), ) .is_ok()); }) @@ -153,7 +154,7 @@ fn bench_compressed_snark_with_computational_commitments(c: &mut Criterion) { let c_secondary = TrivialCircuit::default(); // Produce public parameters - let pp = PublicParams::::setup( + let pp = PublicParams::::setup( &c_primary, &c_secondary, &*SS1::ck_floor(), @@ -164,12 +165,12 @@ fn bench_compressed_snark_with_computational_commitments(c: &mut Criterion) { // produce a recursive SNARK let num_steps = 3; - let mut recursive_snark: RecursiveSNARK = RecursiveSNARK::new( + let mut recursive_snark: RecursiveSNARK = RecursiveSNARK::new( &pp, &c_primary, &c_secondary, - &[::Scalar::from(2u64)], - &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], ) .unwrap(); @@ -181,8 +182,8 @@ fn bench_compressed_snark_with_computational_commitments(c: &mut Criterion) { let res = recursive_snark.verify( &pp, i + 1, - &[::Scalar::from(2u64)], - &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], ); assert!(res.is_ok()); } @@ -209,8 +210,8 @@ fn bench_compressed_snark_with_computational_commitments(c: &mut Criterion) { .verify( black_box(&vk), black_box(num_steps), - black_box(&[::Scalar::from(2u64)]), - black_box(&[::Scalar::from(2u64)]), + black_box(&[::Scalar::from(2u64)]), + black_box(&[::Scalar::from(2u64)]), ) .is_ok()); }) @@ -226,10 +227,7 @@ struct NonTrivialCircuit { _p: PhantomData, } -impl NonTrivialCircuit -where - F: PrimeField, -{ +impl NonTrivialCircuit { pub fn new(num_cons: usize) -> Self { Self { num_cons, @@ -237,10 +235,7 @@ where } } } -impl StepCircuit for NonTrivialCircuit -where - F: PrimeField, -{ +impl StepCircuit for NonTrivialCircuit { fn arity(&self) -> usize { 1 } diff --git a/benches/compute-digest.rs b/benches/compute-digest.rs index 5d1360466..4f47e7b0b 100644 --- a/benches/compute-digest.rs +++ b/benches/compute-digest.rs @@ -4,18 +4,19 @@ use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use ff::PrimeField; use nova_snark::{ + provider::pasta::{PallasEngine, VestaEngine}, traits::{ circuit::{StepCircuit, TrivialCircuit}, snark::default_ck_hint, - Group, + Engine, }, PublicParams, }; -type G1 = pasta_curves::pallas::Point; -type G2 = pasta_curves::vesta::Point; -type C1 = NonTrivialCircuit<::Scalar>; -type C2 = TrivialCircuit<::Scalar>; +type E1 = PallasEngine; +type E2 = VestaEngine; +type C1 = NonTrivialCircuit<::Scalar>; +type C2 = TrivialCircuit<::Scalar>; criterion_group! { name = compute_digest; @@ -28,7 +29,7 @@ criterion_main!(compute_digest); fn bench_compute_digest(c: &mut Criterion) { c.bench_function("compute_digest", |b| { b.iter(|| { - PublicParams::::setup( + PublicParams::::setup( black_box(&C1::new(10)), black_box(&C2::default()), black_box(&*default_ck_hint()), @@ -44,10 +45,7 @@ struct NonTrivialCircuit { _p: PhantomData, } -impl NonTrivialCircuit -where - F: PrimeField, -{ +impl NonTrivialCircuit { pub fn new(num_cons: usize) -> Self { Self { num_cons, @@ -55,10 +53,7 @@ where } } } -impl StepCircuit for NonTrivialCircuit -where - F: PrimeField, -{ +impl StepCircuit for NonTrivialCircuit { fn arity(&self) -> usize { 1 } diff --git a/benches/recursive-snark.rs b/benches/recursive-snark.rs index 9f4dd8cc9..67f817b79 100644 --- a/benches/recursive-snark.rs +++ b/benches/recursive-snark.rs @@ -5,19 +5,20 @@ use core::marker::PhantomData; use criterion::*; use ff::PrimeField; use nova_snark::{ + provider::pasta::{PallasEngine, VestaEngine}, traits::{ circuit::{StepCircuit, TrivialCircuit}, snark::default_ck_hint, - Group, + Engine, }, PublicParams, RecursiveSNARK, }; use std::time::Duration; -type G1 = pasta_curves::pallas::Point; -type G2 = pasta_curves::vesta::Point; -type C1 = NonTrivialCircuit<::Scalar>; -type C2 = TrivialCircuit<::Scalar>; +type E1 = PallasEngine; +type E2 = VestaEngine; +type C1 = NonTrivialCircuit<::Scalar>; +type C2 = TrivialCircuit<::Scalar>; // To run these benchmarks, first download `criterion` with `cargo install cargo install cargo-criterion`. // Then `cargo criterion --bench recursive-snark`. The results are located in `target/criterion/data/`. @@ -57,7 +58,7 @@ fn bench_recursive_snark(c: &mut Criterion) { let c_secondary = TrivialCircuit::default(); // Produce public parameters - let pp = PublicParams::::setup( + let pp = PublicParams::::setup( &c_primary, &c_secondary, &*default_ck_hint(), @@ -69,12 +70,12 @@ fn bench_recursive_snark(c: &mut Criterion) { // the first step is cheaper than other steps owing to the presence of // a lot of zeros in the satisfying assignment let num_warmup_steps = 10; - let mut recursive_snark: RecursiveSNARK = RecursiveSNARK::new( + let mut recursive_snark: RecursiveSNARK = RecursiveSNARK::new( &pp, &c_primary, &c_secondary, - &[::Scalar::from(2u64)], - &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], ) .unwrap(); @@ -86,8 +87,8 @@ fn bench_recursive_snark(c: &mut Criterion) { let res = recursive_snark.verify( &pp, i + 1, - &[::Scalar::from(2u64)], - &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], ); assert!(res.is_ok()); } @@ -112,8 +113,8 @@ fn bench_recursive_snark(c: &mut Criterion) { .verify( black_box(&pp), black_box(num_warmup_steps), - black_box(&[::Scalar::from(2u64)]), - black_box(&[::Scalar::from(2u64)]), + black_box(&[::Scalar::from(2u64)]), + black_box(&[::Scalar::from(2u64)]), ) .is_ok()); }); @@ -128,10 +129,7 @@ struct NonTrivialCircuit { _p: PhantomData, } -impl NonTrivialCircuit -where - F: PrimeField, -{ +impl NonTrivialCircuit { pub fn new(num_cons: usize) -> Self { Self { num_cons, @@ -139,10 +137,7 @@ where } } } -impl StepCircuit for NonTrivialCircuit -where - F: PrimeField, -{ +impl StepCircuit for NonTrivialCircuit { fn arity(&self) -> usize { 1 } diff --git a/benches/sha256.rs b/benches/sha256.rs index 0da6b9161..43d3c3215 100644 --- a/benches/sha256.rs +++ b/benches/sha256.rs @@ -3,27 +3,29 @@ //! This code invokes a hand-written SHA-256 gadget from bellman/bellperson. //! It also uses code from bellman/bellperson to compare circuit-generated digest with sha2 crate's output #![allow(non_snake_case)] -type G1 = pasta_curves::pallas::Point; -type G2 = pasta_curves::vesta::Point; use bellpepper::gadgets::{sha256::sha256, Assignment}; use bellpepper_core::{ boolean::{AllocatedBit, Boolean}, num::{AllocatedNum, Num}, ConstraintSystem, SynthesisError, }; +use core::marker::PhantomData; use core::time::Duration; use criterion::*; use ff::{PrimeField, PrimeFieldBits}; use nova_snark::{ + provider::pasta::{PallasEngine, VestaEngine}, traits::{ circuit::{StepCircuit, TrivialCircuit}, snark::default_ck_hint, - Group, + Engine, }, PublicParams, RecursiveSNARK, }; use sha2::{Digest, Sha256}; -use std::marker::PhantomData; + +type E1 = PallasEngine; +type E2 = VestaEngine; #[derive(Clone, Debug)] struct Sha256Circuit { @@ -120,8 +122,8 @@ impl StepCircuit for Sha256Circuit< } } -type C1 = Sha256Circuit<::Scalar>; -type C2 = TrivialCircuit<::Scalar>; +type C1 = Sha256Circuit<::Scalar>; +type C2 = TrivialCircuit<::Scalar>; criterion_group! { name = recursive_snark; @@ -156,7 +158,7 @@ fn bench_recursive_snark(c: &mut Criterion) { // Produce public parameters let ttc = TrivialCircuit::default(); - let pp = PublicParams::::setup( + let pp = PublicParams::::setup( &circuit_primary, &ttc, &*default_ck_hint(), @@ -164,8 +166,8 @@ fn bench_recursive_snark(c: &mut Criterion) { ); let circuit_secondary = TrivialCircuit::default(); - let z0_primary = vec![::Scalar::from(2u64)]; - let z0_secondary = vec![::Scalar::from(2u64)]; + let z0_primary = vec![::Scalar::from(2u64)]; + let z0_secondary = vec![::Scalar::from(2u64)]; group.bench_function("Prove", |b| { b.iter(|| { diff --git a/examples/minroot.rs b/examples/minroot.rs index 39771d110..dca39b34f 100644 --- a/examples/minroot.rs +++ b/examples/minroot.rs @@ -1,16 +1,15 @@ //! Demonstrates how to use Nova to produce a recursive proof of the correct execution of //! iterations of the `MinRoot` function, thereby realizing a Nova-based verifiable delay function (VDF). //! We execute a configurable number of iterations of the `MinRoot` function per step of Nova's recursion. -type G1 = pasta_curves::pallas::Point; -type G2 = pasta_curves::vesta::Point; use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; use ff::PrimeField; use flate2::{write::ZlibEncoder, Compression}; use nova_snark::{ + provider::pasta::{PallasEngine, VestaEngine}, traits::{ circuit::{StepCircuit, TrivialCircuit}, snark::default_ck_hint, - Group, + Engine, }, CompressedSNARK, PublicParams, RecursiveSNARK, }; @@ -19,6 +18,9 @@ use std::time::Instant; use tracing_subscriber::{fmt, prelude::*, EnvFilter, Registry}; use tracing_texray::TeXRayLayer; +type E1 = PallasEngine; +type E2 = VestaEngine; + #[derive(Clone, Debug)] struct MinRootIteration { x_i: F, @@ -74,10 +76,7 @@ struct MinRootCircuit { seq: Vec>, } -impl StepCircuit for MinRootCircuit -where - F: PrimeField, -{ +impl StepCircuit for MinRootCircuit { fn arity(&self) -> usize { 2 } @@ -149,10 +148,10 @@ fn main() { let circuit_primary = MinRootCircuit { seq: vec![ MinRootIteration { - x_i: ::Scalar::zero(), - y_i: ::Scalar::zero(), - x_i_plus_1: ::Scalar::zero(), - y_i_plus_1: ::Scalar::zero(), + x_i: ::Scalar::zero(), + y_i: ::Scalar::zero(), + x_i_plus_1: ::Scalar::zero(), + y_i_plus_1: ::Scalar::zero(), }; num_iters_per_step ], @@ -166,10 +165,10 @@ fn main() { let start = Instant::now(); println!("Producing public parameters..."); let pp = PublicParams::< - G1, - G2, - MinRootCircuit<::Scalar>, - TrivialCircuit<::Scalar>, + E1, + E2, + MinRootCircuit<::Scalar>, + TrivialCircuit<::Scalar>, >::setup( &circuit_primary, &circuit_secondary, @@ -199,8 +198,8 @@ fn main() { // produce non-deterministic advice let (z0_primary, minroot_iterations) = MinRootIteration::new( num_iters_per_step * num_steps, - &::Scalar::zero(), - &::Scalar::one(), + &::Scalar::zero(), + &::Scalar::one(), ); let minroot_circuits = (0..num_steps) .map(|i| MinRootCircuit { @@ -215,14 +214,14 @@ fn main() { }) .collect::>(); - let z0_secondary = vec![::Scalar::zero()]; + let z0_secondary = vec![::Scalar::zero()]; - type C1 = MinRootCircuit<::Scalar>; - type C2 = TrivialCircuit<::Scalar>; + type C1 = MinRootCircuit<::Scalar>; + type C2 = TrivialCircuit<::Scalar>; // produce a recursive SNARK println!("Generating a RecursiveSNARK..."); - let mut recursive_snark: RecursiveSNARK = - RecursiveSNARK::::new( + let mut recursive_snark: RecursiveSNARK = + RecursiveSNARK::::new( &pp, &minroot_circuits[0], &circuit_secondary, @@ -259,10 +258,10 @@ fn main() { let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp).unwrap(); let start = Instant::now(); - type EE1 = nova_snark::provider::ipa_pc::EvaluationEngine; - type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine; - type S1 = nova_snark::spartan::snark::RelaxedR1CSSNARK; - type S2 = nova_snark::spartan::snark::RelaxedR1CSSNARK; + type EE1 = nova_snark::provider::ipa_pc::EvaluationEngine; + type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine; + type S1 = nova_snark::spartan::snark::RelaxedR1CSSNARK; + type S2 = nova_snark::spartan::snark::RelaxedR1CSSNARK; let res = CompressedSNARK::<_, _, _, _, S1, S2>::prove(&pp, &pk, &recursive_snark); println!( diff --git a/examples/signature.rs b/examples/signature.rs deleted file mode 100644 index 2d06b73d8..000000000 --- a/examples/signature.rs +++ /dev/null @@ -1,312 +0,0 @@ -use bellpepper_core::{ - boolean::AllocatedBit, test_cs::TestConstraintSystem, ConstraintSystem, SynthesisError, -}; -use core::ops::{AddAssign, MulAssign}; -use ff::{ - derive::byteorder::{ByteOrder, LittleEndian}, - Field, PrimeField, PrimeFieldBits, -}; -use nova_snark::{gadgets::ecc::AllocatedPoint, traits::Group as NovaGroup}; -use num_bigint::BigUint; -use pasta_curves::{ - arithmetic::CurveAffine, - group::{Curve, Group}, -}; -use rand::{rngs::OsRng, RngCore}; -use sha3::{Digest, Sha3_512}; - -#[derive(Debug, Clone, Copy)] -pub struct SecretKey(G::Scalar); - -impl SecretKey -where - G: Group, -{ - pub fn random(mut rng: impl RngCore) -> Self { - let secret = G::Scalar::random(&mut rng); - Self(secret) - } -} - -#[derive(Debug, Clone, Copy)] -pub struct PublicKey(G); - -impl PublicKey -where - G: Group, -{ - pub fn from_secret_key(s: &SecretKey) -> Self { - let point = G::generator() * s.0; - Self(point) - } -} - -#[derive(Clone)] -pub struct Signature { - pub r: G, - pub s: G::Scalar, -} - -impl SecretKey -where - G: Group, -{ - pub fn sign(self, c: G::Scalar, mut rng: impl RngCore) -> Signature { - // T - let mut t = [0u8; 80]; - rng.fill_bytes(&mut t[..]); - - // h = H(T || M) - let h = Self::hash_to_scalar(b"Nova_Ecdsa_Hash", &t[..], c.to_repr().as_mut()); - - // R = [h]G - let r = G::generator().mul(h); - - // s = h + c * sk - let mut s = c; - - s.mul_assign(&self.0); - s.add_assign(&h); - - Signature { r, s } - } - - fn mul_bits>(s: &G::Scalar, bits: BitIterator) -> G::Scalar { - let mut x = G::Scalar::ZERO; - for bit in bits { - x = x.double(); - - if bit { - x.add_assign(s) - } - } - x - } - - fn to_uniform(digest: &[u8]) -> G::Scalar { - assert_eq!(digest.len(), 64); - let mut bits: [u64; 8] = [0; 8]; - LittleEndian::read_u64_into(digest, &mut bits); - Self::mul_bits(&G::Scalar::ONE, BitIterator::new(bits)) - } - - pub fn to_uniform_32(digest: &[u8]) -> G::Scalar { - assert_eq!(digest.len(), 32); - let mut bits: [u64; 4] = [0; 4]; - LittleEndian::read_u64_into(digest, &mut bits); - Self::mul_bits(&G::Scalar::ONE, BitIterator::new(bits)) - } - - pub fn hash_to_scalar(persona: &[u8], a: &[u8], b: &[u8]) -> G::Scalar { - let mut hasher = Sha3_512::new(); - hasher.update(persona); - hasher.update(a); - hasher.update(b); - let digest = hasher.finalize(); - Self::to_uniform(digest.as_ref()) - } -} - -impl PublicKey -where - G: Group, - G::Scalar: PrimeFieldBits, -{ - pub fn verify(&self, c: G::Scalar, signature: &Signature) -> bool { - let modulus = Self::modulus_as_scalar(); - let order_check_pk = self.0.mul(modulus); - if !order_check_pk.eq(&G::identity()) { - return false; - } - - let order_check_r = signature.r.mul(modulus); - if !order_check_r.eq(&G::identity()) { - return false; - } - - // 0 = [-s]G + R + [c]PK - self - .0 - .mul(c) - .add(&signature.r) - .add(G::generator().mul(signature.s).neg()) - .eq(&G::identity()) - } - - fn modulus_as_scalar() -> G::Scalar { - let mut bits = G::Scalar::char_le_bits().to_bitvec(); - let mut acc = BigUint::new(Vec::::new()); - while let Some(b) = bits.pop() { - acc <<= 1_i32; - acc += u8::from(b); - } - let modulus = acc.to_str_radix(10); - G::Scalar::from_str_vartime(&modulus).unwrap() - } -} - -#[derive(Debug)] -pub struct BitIterator { - t: E, - n: usize, -} - -impl> BitIterator { - pub fn new(t: E) -> Self { - let n = t.as_ref().len() * 64; - - BitIterator { t, n } - } -} - -impl> Iterator for BitIterator { - type Item = bool; - - fn next(&mut self) -> Option { - if self.n == 0 { - None - } else { - self.n -= 1; - let part = self.n / 64; - let bit = self.n - (64 * part); - - Some(self.t.as_ref()[part] & (1 << bit) > 0) - } - } -} - -// Synthesize a bit representation into circuit gadgets. -pub fn synthesize_bits>( - cs: &mut CS, - bits: &Option>, -) -> Result, SynthesisError> { - (0..F::NUM_BITS) - .map(|i| { - AllocatedBit::alloc( - cs.namespace(|| format!("bit {i}")), - Some(bits.as_ref().unwrap()[i as usize]), - ) - }) - .collect::, SynthesisError>>() -} - -pub fn verify_signature>( - cs: &mut CS, - pk: &AllocatedPoint, - r: &AllocatedPoint, - s_bits: &[AllocatedBit], - c_bits: &[AllocatedBit], -) -> Result<(), SynthesisError> { - let g = AllocatedPoint::::alloc( - cs.namespace(|| "g"), - Some(( - G::Base::from_str_vartime( - "28948022309329048855892746252171976963363056481941647379679742748393362948096", - ) - .unwrap(), - G::Base::from_str_vartime("2").unwrap(), - false, - )), - ) - .unwrap(); - - cs.enforce( - || "gx is vesta curve", - |lc| lc + g.get_coordinates().0.get_variable(), - |lc| lc + CS::one(), - |lc| { - lc + ( - G::Base::from_str_vartime( - "28948022309329048855892746252171976963363056481941647379679742748393362948096", - ) - .unwrap(), - CS::one(), - ) - }, - ); - - cs.enforce( - || "gy is vesta curve", - |lc| lc + g.get_coordinates().1.get_variable(), - |lc| lc + CS::one(), - |lc| lc + (G::Base::from_str_vartime("2").unwrap(), CS::one()), - ); - - let sg = g.scalar_mul(cs.namespace(|| "[s]G"), s_bits)?; - let cpk = pk.scalar_mul(&mut cs.namespace(|| "[c]PK"), c_bits)?; - let rcpk = cpk.add(&mut cs.namespace(|| "R + [c]PK"), r)?; - - let (rcpk_x, rcpk_y, _) = rcpk.get_coordinates(); - let (sg_x, sg_y, _) = sg.get_coordinates(); - - cs.enforce( - || "sg_x == rcpk_x", - |lc| lc + sg_x.get_variable(), - |lc| lc + CS::one(), - |lc| lc + rcpk_x.get_variable(), - ); - - cs.enforce( - || "sg_y == rcpk_y", - |lc| lc + sg_y.get_variable(), - |lc| lc + CS::one(), - |lc| lc + rcpk_y.get_variable(), - ); - - Ok(()) -} - -type G1 = pasta_curves::pallas::Point; -type G2 = pasta_curves::vesta::Point; - -fn main() { - let mut cs = TestConstraintSystem::<::Scalar>::new(); - assert!(cs.is_satisfied()); - assert_eq!(cs.num_constraints(), 0); - - let sk = SecretKey::::random(&mut OsRng); - let pk = PublicKey::from_secret_key(&sk); - - // generate a random message to sign - let c = ::Scalar::random(&mut OsRng); - - // sign and verify - let signature = sk.sign(c, &mut OsRng); - let result = pk.verify(c, &signature); - assert!(result); - - // prepare inputs to the circuit gadget - let pk = { - let pkxy = pk.0.to_affine().coordinates().unwrap(); - - AllocatedPoint::::alloc( - cs.namespace(|| "pub key"), - Some((*pkxy.x(), *pkxy.y(), false)), - ) - .unwrap() - }; - let r = { - let rxy = signature.r.to_affine().coordinates().unwrap(); - AllocatedPoint::alloc(cs.namespace(|| "r"), Some((*rxy.x(), *rxy.y(), false))).unwrap() - }; - let s = { - let s_bits = signature - .s - .to_le_bits() - .iter() - .map(|b| *b) - .collect::>(); - - synthesize_bits(&mut cs.namespace(|| "s bits"), &Some(s_bits)).unwrap() - }; - let c = { - let c_bits = c.to_le_bits().iter().map(|b| *b).collect::>(); - - synthesize_bits(&mut cs.namespace(|| "c bits"), &Some(c_bits)).unwrap() - }; - - // Check the signature was signed by the correct sk using the pk - verify_signature(&mut cs, &pk, &r, &s, &c).unwrap(); - - assert!(cs.is_satisfied()); -} diff --git a/src/bellpepper/mod.rs b/src/bellpepper/mod.rs index 5183a35a5..456f6cb5a 100644 --- a/src/bellpepper/mod.rs +++ b/src/bellpepper/mod.rs @@ -15,7 +15,8 @@ mod tests { shape_cs::ShapeCS, solver::SatisfyingAssignment, }, - traits::{snark::default_ck_hint, Group}, + provider::{bn256_grumpkin::Bn256Engine, pasta::PallasEngine, secp_secq::Secp256k1Engine}, + traits::{snark::default_ck_hint, Engine}, }; use bellpepper_core::{num::AllocatedNum, ConstraintSystem}; use ff::PrimeField; @@ -40,17 +41,14 @@ mod tests { ); } - fn test_alloc_bit_with() - where - G: Group, - { + fn test_alloc_bit_with() { // First create the shape - let mut cs: ShapeCS = ShapeCS::new(); + let mut cs: ShapeCS = ShapeCS::new(); synthesize_alloc_bit(&mut cs); let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); // Now get the assignment - let mut cs = SatisfyingAssignment::::new(); + let mut cs = SatisfyingAssignment::::new(); synthesize_alloc_bit(&mut cs); let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); @@ -60,8 +58,8 @@ mod tests { #[test] fn test_alloc_bit() { - test_alloc_bit_with::(); - test_alloc_bit_with::(); - test_alloc_bit_with::(); + test_alloc_bit_with::(); + test_alloc_bit_with::(); + test_alloc_bit_with::(); } } diff --git a/src/bellpepper/r1cs.rs b/src/bellpepper/r1cs.rs index eecddf317..131c032c3 100644 --- a/src/bellpepper/r1cs.rs +++ b/src/bellpepper/r1cs.rs @@ -6,49 +6,49 @@ use super::{shape_cs::ShapeCS, solver::SatisfyingAssignment, test_shape_cs::Test use crate::{ errors::NovaError, r1cs::{commitment_key, CommitmentKeyHint, R1CSInstance, R1CSShape, R1CSWitness, SparseMatrix}, - traits::Group, + traits::Engine, CommitmentKey, }; use bellpepper_core::{Index, LinearCombination}; use ff::PrimeField; /// `NovaWitness` provide a method for acquiring an `R1CSInstance` and `R1CSWitness` from implementers. -pub trait NovaWitness { +pub trait NovaWitness { /// Return an instance and witness, given a shape and ck. fn r1cs_instance_and_witness( &self, - shape: &R1CSShape, - ck: &CommitmentKey, - ) -> Result<(R1CSInstance, R1CSWitness), NovaError>; + shape: &R1CSShape, + ck: &CommitmentKey, + ) -> Result<(R1CSInstance, R1CSWitness), NovaError>; } /// `NovaShape` provides methods for acquiring `R1CSShape` and `CommitmentKey` from implementers. -pub trait NovaShape { +pub trait NovaShape { /// Return an appropriate `R1CSShape` and `CommitmentKey` structs. /// A `CommitmentKeyHint` should be provided to help guide the construction of the `CommitmentKey`. /// This parameter is documented in `r1cs::R1CS::commitment_key`. - fn r1cs_shape_and_key(&self, ck_hint: &CommitmentKeyHint) -> (R1CSShape, CommitmentKey) { + fn r1cs_shape_and_key(&self, ck_hint: &CommitmentKeyHint) -> (R1CSShape, CommitmentKey) { let S = self.r1cs_shape(); let ck = commitment_key(&S, ck_hint); (S, ck) } /// Return an appropriate `R1CSShape`. - fn r1cs_shape(&self) -> R1CSShape; + fn r1cs_shape(&self) -> R1CSShape; } -impl NovaWitness for SatisfyingAssignment { +impl NovaWitness for SatisfyingAssignment { fn r1cs_instance_and_witness( &self, - shape: &R1CSShape, - ck: &CommitmentKey, - ) -> Result<(R1CSInstance, R1CSWitness), NovaError> { - let W = R1CSWitness::::new(shape, self.aux_assignment())?; + shape: &R1CSShape, + ck: &CommitmentKey, + ) -> Result<(R1CSInstance, R1CSWitness), NovaError> { + let W = R1CSWitness::::new(shape, self.aux_assignment())?; let X = &self.input_assignment()[1..]; let comm_W = W.commit(ck); - let instance = R1CSInstance::::new(shape, &comm_W, X)?; + let instance = R1CSInstance::::new(shape, &comm_W, X)?; Ok((instance, W)) } @@ -56,14 +56,14 @@ impl NovaWitness for SatisfyingAssignment { macro_rules! impl_nova_shape { ( $name:ident) => { - impl NovaShape for $name + impl NovaShape for $name where - G::Scalar: PrimeField, + E::Scalar: PrimeField, { - fn r1cs_shape(&self) -> R1CSShape { - let mut A = SparseMatrix::::empty(); - let mut B = SparseMatrix::::empty(); - let mut C: SparseMatrix<::Scalar> = SparseMatrix::::empty(); + fn r1cs_shape(&self) -> R1CSShape { + let mut A = SparseMatrix::::empty(); + let mut B = SparseMatrix::::empty(); + let mut C: SparseMatrix<::Scalar> = SparseMatrix::::empty(); let mut num_cons_added = 0; let mut X = (&mut A, &mut B, &mut C, &mut num_cons_added); diff --git a/src/bellpepper/shape_cs.rs b/src/bellpepper/shape_cs.rs index f3a5b7b64..57543401d 100644 --- a/src/bellpepper/shape_cs.rs +++ b/src/bellpepper/shape_cs.rs @@ -1,25 +1,25 @@ //! Support for generating R1CS shape using bellpepper. -use crate::traits::Group; +use crate::traits::Engine; use bellpepper_core::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable}; use ff::PrimeField; /// `ShapeCS` is a `ConstraintSystem` for creating `R1CSShape`s for a circuit. -pub struct ShapeCS +pub struct ShapeCS where - G::Scalar: PrimeField, + E::Scalar: PrimeField, { /// All constraints added to the `ShapeCS`. pub constraints: Vec<( - LinearCombination, - LinearCombination, - LinearCombination, + LinearCombination, + LinearCombination, + LinearCombination, )>, inputs: usize, aux: usize, } -impl ShapeCS { +impl ShapeCS { /// Create a new, default `ShapeCS`, pub fn new() -> Self { ShapeCS::default() @@ -41,7 +41,7 @@ impl ShapeCS { } } -impl Default for ShapeCS { +impl Default for ShapeCS { fn default() -> Self { ShapeCS { constraints: vec![], @@ -51,12 +51,12 @@ impl Default for ShapeCS { } } -impl ConstraintSystem for ShapeCS { +impl ConstraintSystem for ShapeCS { type Root = Self; fn alloc(&mut self, _annotation: A, _f: F) -> Result where - F: FnOnce() -> Result, + F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into, { @@ -67,7 +67,7 @@ impl ConstraintSystem for ShapeCS { fn alloc_input(&mut self, _annotation: A, _f: F) -> Result where - F: FnOnce() -> Result, + F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into, { @@ -80,9 +80,9 @@ impl ConstraintSystem for ShapeCS { where A: FnOnce() -> AR, AR: Into, - LA: FnOnce(LinearCombination) -> LinearCombination, - LB: FnOnce(LinearCombination) -> LinearCombination, - LC: FnOnce(LinearCombination) -> LinearCombination, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination, { let a = a(LinearCombination::zero()); let b = b(LinearCombination::zero()); diff --git a/src/bellpepper/solver.rs b/src/bellpepper/solver.rs index 516d0df93..4fc211a3b 100644 --- a/src/bellpepper/solver.rs +++ b/src/bellpepper/solver.rs @@ -1,8 +1,8 @@ //! Support for generating R1CS witness using bellpepper. -use crate::traits::Group; +use crate::traits::Engine; use bellpepper::util_cs::witness_cs::WitnessCS; /// A `ConstraintSystem` which calculates witness values for a concrete instance of an R1CS circuit. -pub type SatisfyingAssignment = WitnessCS<::Scalar>; +pub type SatisfyingAssignment = WitnessCS<::Scalar>; diff --git a/src/bellpepper/test_shape_cs.rs b/src/bellpepper/test_shape_cs.rs index 0626d7cf3..ca95ebba2 100644 --- a/src/bellpepper/test_shape_cs.rs +++ b/src/bellpepper/test_shape_cs.rs @@ -6,7 +6,7 @@ use std::{ collections::{BTreeMap, HashMap}, }; -use crate::traits::Group; +use crate::traits::Engine; use bellpepper_core::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable}; use core::fmt::Write; use ff::{Field, PrimeField}; @@ -48,17 +48,14 @@ impl Ord for OrderedVariable { } /// `TestShapeCS` is a `ConstraintSystem` for creating `R1CSShape`s for a circuit. -pub struct TestShapeCS -where - G::Scalar: PrimeField + Field, -{ +pub struct TestShapeCS { named_objects: HashMap, current_namespace: Vec, /// All constraints added to the `TestShapeCS`. pub constraints: Vec<( - LinearCombination, - LinearCombination, - LinearCombination, + LinearCombination, + LinearCombination, + LinearCombination, String, )>, inputs: Vec, @@ -91,9 +88,9 @@ fn proc_lc( map } -impl TestShapeCS +impl TestShapeCS where - G::Scalar: PrimeField, + E::Scalar: PrimeField, { #[allow(unused)] /// Create a new, default `TestShapeCS`, @@ -144,16 +141,16 @@ where writeln!(s, "INPUT {}", &input).unwrap() } - let negone = -::ONE; + let negone = -::ONE; - let powers_of_two = (0..G::Scalar::NUM_BITS) - .map(|i| G::Scalar::from(2u64).pow_vartime([u64::from(i)])) + let powers_of_two = (0..E::Scalar::NUM_BITS) + .map(|i| E::Scalar::from(2u64).pow_vartime([u64::from(i)])) .collect::>(); - let pp = |s: &mut String, lc: &LinearCombination| { + let pp = |s: &mut String, lc: &LinearCombination| { s.push('('); let mut is_first = true; - for (var, coeff) in proc_lc::(lc) { + for (var, coeff) in proc_lc::(lc) { if coeff == negone { s.push_str(" - ") } else if !is_first { @@ -161,7 +158,7 @@ where } is_first = false; - if coeff != ::ONE && coeff != negone { + if coeff != ::ONE && coeff != negone { for (i, x) in powers_of_two.iter().enumerate() { if x == &coeff { write!(s, "2^{i} . ").unwrap(); @@ -216,13 +213,10 @@ where } } -impl Default for TestShapeCS -where - G::Scalar: PrimeField, -{ +impl Default for TestShapeCS { fn default() -> Self { let mut map = HashMap::new(); - map.insert("ONE".into(), NamedObject::Var(TestShapeCS::::one())); + map.insert("ONE".into(), NamedObject::Var(TestShapeCS::::one())); TestShapeCS { named_objects: map, current_namespace: vec![], @@ -233,15 +227,15 @@ where } } -impl ConstraintSystem for TestShapeCS +impl ConstraintSystem for TestShapeCS where - G::Scalar: PrimeField, + E::Scalar: PrimeField, { type Root = Self; fn alloc(&mut self, annotation: A, _f: F) -> Result where - F: FnOnce() -> Result, + F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into, { @@ -253,7 +247,7 @@ where fn alloc_input(&mut self, annotation: A, _f: F) -> Result where - F: FnOnce() -> Result, + F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into, { @@ -267,9 +261,9 @@ where where A: FnOnce() -> AR, AR: Into, - LA: FnOnce(LinearCombination) -> LinearCombination, - LB: FnOnce(LinearCombination) -> LinearCombination, - LC: FnOnce(LinearCombination) -> LinearCombination, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination, { let path = compute_path(&self.current_namespace, &annotation().into()); let index = self.constraints.len(); diff --git a/src/circuit.rs b/src/circuit.rs index d3d1837f0..332e4abf9 100644 --- a/src/circuit.rs +++ b/src/circuit.rs @@ -15,7 +15,7 @@ use crate::{ }, r1cs::{R1CSInstance, RelaxedR1CSInstance}, traits::{ - circuit::StepCircuit, commitment::CommitmentTrait, Group, ROCircuitTrait, ROConstantsCircuit, + circuit::StepCircuit, commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit, }, Commitment, }; @@ -48,26 +48,26 @@ impl NovaAugmentedCircuitParams { #[derive(Debug, Serialize, Deserialize)] #[serde(bound = "")] -pub struct NovaAugmentedCircuitInputs { - params: G::Scalar, - i: G::Base, - z0: Vec, - zi: Option>, - U: Option>, - u: Option>, - T: Option>, +pub struct NovaAugmentedCircuitInputs { + params: E::Scalar, + i: E::Base, + z0: Vec, + zi: Option>, + U: Option>, + u: Option>, + T: Option>, } -impl NovaAugmentedCircuitInputs { +impl NovaAugmentedCircuitInputs { /// Create new inputs/witness for the verification circuit pub fn new( - params: G::Scalar, - i: G::Base, - z0: Vec, - zi: Option>, - U: Option>, - u: Option>, - T: Option>, + params: E::Scalar, + i: E::Base, + z0: Vec, + zi: Option>, + U: Option>, + u: Option>, + T: Option>, ) -> Self { Self { params, @@ -83,20 +83,20 @@ impl NovaAugmentedCircuitInputs { /// The augmented circuit F' in Nova that includes a step circuit F /// and the circuit for the verifier in Nova's non-interactive folding scheme -pub struct NovaAugmentedCircuit<'a, G: Group, SC: StepCircuit> { +pub struct NovaAugmentedCircuit<'a, E: Engine, SC: StepCircuit> { params: &'a NovaAugmentedCircuitParams, - ro_consts: ROConstantsCircuit, - inputs: Option>, + ro_consts: ROConstantsCircuit, + inputs: Option>, step_circuit: &'a SC, // The function that is applied for each step } -impl<'a, G: Group, SC: StepCircuit> NovaAugmentedCircuit<'a, G, SC> { +impl<'a, E: Engine, SC: StepCircuit> NovaAugmentedCircuit<'a, E, SC> { /// Create a new verification circuit for the input relaxed r1cs instances pub const fn new( params: &'a NovaAugmentedCircuitParams, - inputs: Option>, + inputs: Option>, step_circuit: &'a SC, - ro_consts: ROConstantsCircuit, + ro_consts: ROConstantsCircuit, ) -> Self { Self { params, @@ -107,24 +107,24 @@ impl<'a, G: Group, SC: StepCircuit> NovaAugmentedCircuit<'a, G, SC> { } /// Allocate all witnesses and return - fn alloc_witness::Base>>( + fn alloc_witness::Base>>( &self, mut cs: CS, arity: usize, ) -> Result< ( - AllocatedNum, - AllocatedNum, - Vec>, - Vec>, - AllocatedRelaxedR1CSInstance, - AllocatedR1CSInstance, - AllocatedPoint, + AllocatedNum, + AllocatedNum, + Vec>, + Vec>, + AllocatedRelaxedR1CSInstance, + AllocatedR1CSInstance, + AllocatedPoint, ), SynthesisError, > { // Allocate the params - let params = alloc_scalar_as_base::( + let params = alloc_scalar_as_base::( cs.namespace(|| "params"), self.inputs.as_ref().map(|inputs| inputs.params), )?; @@ -139,20 +139,20 @@ impl<'a, G: Group, SC: StepCircuit> NovaAugmentedCircuit<'a, G, SC> { Ok(self.inputs.get()?.z0[i]) }) }) - .collect::>, _>>()?; + .collect::>, _>>()?; // Allocate zi. If inputs.zi is not provided (base case) allocate default value 0 - let zero = vec![G::Base::ZERO; arity]; + let zero = vec![E::Base::ZERO; arity]; let z_i = (0..arity) .map(|i| { AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { Ok(self.inputs.get()?.zi.as_ref().unwrap_or(&zero)[i]) }) }) - .collect::>, _>>()?; + .collect::>, _>>()?; // Allocate the running instance - let U: AllocatedRelaxedR1CSInstance = AllocatedRelaxedR1CSInstance::alloc( + let U: AllocatedRelaxedR1CSInstance = AllocatedRelaxedR1CSInstance::alloc( cs.namespace(|| "Allocate U"), self.inputs.as_ref().and_then(|inputs| inputs.U.as_ref()), self.params.limb_width, @@ -179,12 +179,12 @@ impl<'a, G: Group, SC: StepCircuit> NovaAugmentedCircuit<'a, G, SC> { } /// Synthesizes base case and returns the new relaxed `R1CSInstance` - fn synthesize_base_case::Base>>( + fn synthesize_base_case::Base>>( &self, mut cs: CS, - u: AllocatedR1CSInstance, - ) -> Result, SynthesisError> { - let U_default: AllocatedRelaxedR1CSInstance = if self.params.is_primary_circuit { + u: AllocatedR1CSInstance, + ) -> Result, SynthesisError> { + let U_default: AllocatedRelaxedR1CSInstance = if self.params.is_primary_circuit { // The primary circuit just returns the default R1CS instance AllocatedRelaxedR1CSInstance::default( cs.namespace(|| "Allocate U_default"), @@ -205,20 +205,20 @@ impl<'a, G: Group, SC: StepCircuit> NovaAugmentedCircuit<'a, G, SC> { /// Synthesizes non base case and returns the new relaxed `R1CSInstance` /// And a boolean indicating if all checks pass - fn synthesize_non_base_case::Base>>( + fn synthesize_non_base_case::Base>>( &self, mut cs: CS, - params: &AllocatedNum, - i: &AllocatedNum, - z_0: &[AllocatedNum], - z_i: &[AllocatedNum], - U: &AllocatedRelaxedR1CSInstance, - u: &AllocatedR1CSInstance, - T: &AllocatedPoint, + params: &AllocatedNum, + i: &AllocatedNum, + z_0: &[AllocatedNum], + z_i: &[AllocatedNum], + U: &AllocatedRelaxedR1CSInstance, + u: &AllocatedR1CSInstance, + T: &AllocatedPoint, arity: usize, - ) -> Result<(AllocatedRelaxedR1CSInstance, AllocatedBit), SynthesisError> { + ) -> Result<(AllocatedRelaxedR1CSInstance, AllocatedBit), SynthesisError> { // Check that u.x[0] = Hash(params, U, i, z0, zi) - let mut ro = G::ROCircuit::new( + let mut ro = E::ROCircuit::new( self.ro_consts.clone(), NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * arity, ); @@ -255,12 +255,12 @@ impl<'a, G: Group, SC: StepCircuit> NovaAugmentedCircuit<'a, G, SC> { } } -impl<'a, G: Group, SC: StepCircuit> NovaAugmentedCircuit<'a, G, SC> { +impl<'a, E: Engine, SC: StepCircuit> NovaAugmentedCircuit<'a, E, SC> { /// synthesize circuit giving constraint system - pub fn synthesize::Base>>( + pub fn synthesize::Base>>( self, cs: &mut CS, - ) -> Result>, SynthesisError> { + ) -> Result>, SynthesisError> { let arity = self.step_circuit.arity(); // Allocate all witnesses @@ -310,7 +310,7 @@ impl<'a, G: Group, SC: StepCircuit> NovaAugmentedCircuit<'a, G, SC> { // Compute i + 1 let i_new = AllocatedNum::alloc(cs.namespace(|| "i + 1"), || { - Ok(*i.get_value().get()? + G::Base::ONE) + Ok(*i.get_value().get()? + E::Base::ONE) })?; cs.enforce( || "check i + 1", @@ -338,7 +338,7 @@ impl<'a, G: Group, SC: StepCircuit> NovaAugmentedCircuit<'a, G, SC> { } // Compute the new hash H(params, Unew, i+1, z0, z_{i+1}) - let mut ro = G::ROCircuit::new(self.ro_consts, NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * arity); + let mut ro = E::ROCircuit::new(self.ro_consts, NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * arity); ro.absorb(¶ms); ro.absorb(&i_new); for e in &z_0 { @@ -363,55 +363,58 @@ impl<'a, G: Group, SC: StepCircuit> NovaAugmentedCircuit<'a, G, SC> { #[cfg(test)] mod tests { use super::*; - use crate::bellpepper::{solver::SatisfyingAssignment, test_shape_cs::TestShapeCS}; - type PastaG1 = pasta_curves::pallas::Point; - type PastaG2 = pasta_curves::vesta::Point; - - use crate::constants::{BN_LIMB_WIDTH, BN_N_LIMBS}; - use crate::provider; - use crate::traits::snark::default_ck_hint; use crate::{ - bellpepper::r1cs::{NovaShape, NovaWitness}, + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + solver::SatisfyingAssignment, + test_shape_cs::TestShapeCS, + }, + constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, gadgets::utils::scalar_as_base, - provider::poseidon::PoseidonConstantsCircuit, - traits::circuit::TrivialCircuit, + provider::{ + bn256_grumpkin::{Bn256Engine, GrumpkinEngine}, + pasta::{PallasEngine, VestaEngine}, + poseidon::PoseidonConstantsCircuit, + secp_secq::{Secp256k1Engine, Secq256k1Engine}, + }, + traits::{circuit::TrivialCircuit, snark::default_ck_hint}, }; // In the following we use 1 to refer to the primary, and 2 to refer to the secondary circuit - fn test_recursive_circuit_with( + fn test_recursive_circuit_with( primary_params: &NovaAugmentedCircuitParams, secondary_params: &NovaAugmentedCircuitParams, - ro_consts1: ROConstantsCircuit, - ro_consts2: ROConstantsCircuit, + ro_consts1: ROConstantsCircuit, + ro_consts2: ROConstantsCircuit, num_constraints_primary: usize, num_constraints_secondary: usize, ) where - G1: Group::Scalar>, - G2: Group::Scalar>, + E1: Engine::Scalar>, + E2: Engine::Scalar>, { let tc1 = TrivialCircuit::default(); // Initialize the shape and ck for the primary - let circuit1: NovaAugmentedCircuit<'_, G2, TrivialCircuit<::Base>> = + let circuit1: NovaAugmentedCircuit<'_, E2, TrivialCircuit<::Base>> = NovaAugmentedCircuit::new(primary_params, None, &tc1, ro_consts1.clone()); - let mut cs: TestShapeCS = TestShapeCS::new(); + let mut cs: TestShapeCS = TestShapeCS::new(); let _ = circuit1.synthesize(&mut cs); let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); assert_eq!(cs.num_constraints(), num_constraints_primary); let tc2 = TrivialCircuit::default(); // Initialize the shape and ck for the secondary - let circuit2: NovaAugmentedCircuit<'_, G1, TrivialCircuit<::Base>> = + let circuit2: NovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = NovaAugmentedCircuit::new(secondary_params, None, &tc2, ro_consts2.clone()); - let mut cs: TestShapeCS = TestShapeCS::new(); + let mut cs: TestShapeCS = TestShapeCS::new(); let _ = circuit2.synthesize(&mut cs); let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); assert_eq!(cs.num_constraints(), num_constraints_secondary); // Execute the base case for the primary - let zero1 = <::Base as Field>::ZERO; - let mut cs1 = SatisfyingAssignment::::new(); - let inputs1: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( - scalar_as_base::(zero1), // pass zero for testing + let zero1 = <::Base as Field>::ZERO; + let mut cs1 = SatisfyingAssignment::::new(); + let inputs1: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( + scalar_as_base::(zero1), // pass zero for testing zero1, vec![zero1], None, @@ -419,7 +422,7 @@ mod tests { None, None, ); - let circuit1: NovaAugmentedCircuit<'_, G2, TrivialCircuit<::Base>> = + let circuit1: NovaAugmentedCircuit<'_, E2, TrivialCircuit<::Base>> = NovaAugmentedCircuit::new(primary_params, Some(inputs1), &tc1, ro_consts1); let _ = circuit1.synthesize(&mut cs1); let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, &ck1).unwrap(); @@ -427,10 +430,10 @@ mod tests { assert!(shape1.is_sat(&ck1, &inst1, &witness1).is_ok()); // Execute the base case for the secondary - let zero2 = <::Base as Field>::ZERO; - let mut cs2 = SatisfyingAssignment::::new(); - let inputs2: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( - scalar_as_base::(zero2), // pass zero for testing + let zero2 = <::Base as Field>::ZERO; + let mut cs2 = SatisfyingAssignment::::new(); + let inputs2: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( + scalar_as_base::(zero2), // pass zero for testing zero2, vec![zero2], None, @@ -438,7 +441,7 @@ mod tests { Some(inst1), None, ); - let circuit2: NovaAugmentedCircuit<'_, G1, TrivialCircuit<::Base>> = + let circuit2: NovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = NovaAugmentedCircuit::new(secondary_params, Some(inputs2), &tc2, ro_consts2); let _ = circuit2.synthesize(&mut cs2); let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, &ck2).unwrap(); @@ -450,10 +453,10 @@ mod tests { fn test_recursive_circuit_pasta() { let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); let params2 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); - let ro_consts1: ROConstantsCircuit = PoseidonConstantsCircuit::default(); - let ro_consts2: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + let ro_consts1: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + let ro_consts2: ROConstantsCircuit = PoseidonConstantsCircuit::default(); - test_recursive_circuit_with::( + test_recursive_circuit_with::( ¶ms1, ¶ms2, ro_consts1, ro_consts2, 9825, 10357, ); } @@ -462,29 +465,23 @@ mod tests { fn test_recursive_circuit_grumpkin() { let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); let params2 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); - let ro_consts1: ROConstantsCircuit = - PoseidonConstantsCircuit::default(); - let ro_consts2: ROConstantsCircuit = - PoseidonConstantsCircuit::default(); - - test_recursive_circuit_with::< - provider::bn256_grumpkin::bn256::Point, - provider::bn256_grumpkin::grumpkin::Point, - >(¶ms1, ¶ms2, ro_consts1, ro_consts2, 9993, 10546); + let ro_consts1: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + let ro_consts2: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + + test_recursive_circuit_with::( + ¶ms1, ¶ms2, ro_consts1, ro_consts2, 9993, 10546, + ); } #[test] fn test_recursive_circuit_secp() { let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); let params2 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); - let ro_consts1: ROConstantsCircuit = - PoseidonConstantsCircuit::default(); - let ro_consts2: ROConstantsCircuit = - PoseidonConstantsCircuit::default(); - - test_recursive_circuit_with::< - provider::secp_secq::secp256k1::Point, - provider::secp_secq::secq256k1::Point, - >(¶ms1, ¶ms2, ro_consts1, ro_consts2, 10272, 10969); + let ro_consts1: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + let ro_consts2: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + + test_recursive_circuit_with::( + ¶ms1, ¶ms2, ro_consts1, ro_consts2, 10272, 10969, + ); } } diff --git a/src/digest.rs b/src/digest.rs index 5eb0c990b..fccf17b83 100644 --- a/src/digest.rs +++ b/src/digest.rs @@ -80,25 +80,24 @@ impl<'a, F: PrimeField, T: Digestible> DigestComputer<'a, F, T> { #[cfg(test)] mod tests { + use super::{DigestComputer, SimpleDigestible}; + use crate::{provider::pasta::PallasEngine, traits::Engine}; use ff::Field; use once_cell::sync::OnceCell; - use pasta_curves::pallas; use serde::{Deserialize, Serialize}; - use crate::traits::Group; - - use super::{DigestComputer, SimpleDigestible}; + type E = PallasEngine; #[derive(Serialize, Deserialize)] - struct S { + struct S { i: usize, #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, + digest: OnceCell, } - impl SimpleDigestible for S {} + impl SimpleDigestible for S {} - impl S { + impl S { fn new(i: usize) -> Self { S { i, @@ -106,7 +105,7 @@ mod tests { } } - fn digest(&self) -> G::Scalar { + fn digest(&self) -> E::Scalar { self .digest .get_or_try_init(|| DigestComputer::new(self).digest()) @@ -115,23 +114,21 @@ mod tests { } } - type G = pallas::Point; - #[test] fn test_digest_field_not_ingested_in_computation() { - let s1 = S::::new(42); + let s1 = S::::new(42); // let's set up a struct with a weird digest field to make sure the digest computation does not depend of it let oc = OnceCell::new(); - oc.set(::Scalar::ONE).unwrap(); + oc.set(::Scalar::ONE).unwrap(); - let s2: S = S { i: 42, digest: oc }; + let s2: S = S { i: 42, digest: oc }; assert_eq!( - DigestComputer::<::Scalar, _>::new(&s1) + DigestComputer::<::Scalar, _>::new(&s1) .digest() .unwrap(), - DigestComputer::<::Scalar, _>::new(&s2) + DigestComputer::<::Scalar, _>::new(&s2) .digest() .unwrap() ); @@ -140,7 +137,7 @@ mod tests { // equality will not result in `s1.digest() == s2.digest` assert_ne!( s2.digest(), - DigestComputer::<::Scalar, _>::new(&s2) + DigestComputer::<::Scalar, _>::new(&s2) .digest() .unwrap() ); @@ -148,19 +145,19 @@ mod tests { #[test] fn test_digest_impervious_to_serialization() { - let good_s = S::::new(42); + let good_s = S::::new(42); // let's set up a struct with a weird digest field to confuse deserializers let oc = OnceCell::new(); - oc.set(::Scalar::ONE).unwrap(); + oc.set(::Scalar::ONE).unwrap(); - let bad_s: S = S { i: 42, digest: oc }; + let bad_s: S = S { i: 42, digest: oc }; // this justifies the adjective "bad" assert_ne!(good_s.digest(), bad_s.digest()); let naughty_bytes = bincode::serialize(&bad_s).unwrap(); - let retrieved_s: S = bincode::deserialize(&naughty_bytes).unwrap(); + let retrieved_s: S = bincode::deserialize(&naughty_bytes).unwrap(); assert_eq!(good_s.digest(), retrieved_s.digest()) } } diff --git a/src/gadgets/ecc.rs b/src/gadgets/ecc.rs index ac0090bbc..4c2121fdb 100644 --- a/src/gadgets/ecc.rs +++ b/src/gadgets/ecc.rs @@ -6,7 +6,7 @@ use crate::{ select_num_or_one, select_num_or_zero, select_num_or_zero2, select_one_or_diff2, select_one_or_num2, select_zero_or_num2, }, - traits::Group, + traits::{Engine, Group}, }; use bellpepper::gadgets::Assignment; use bellpepper_core::{ @@ -18,39 +18,33 @@ use ff::{Field, PrimeField}; /// `AllocatedPoint` provides an elliptic curve abstraction inside a circuit. #[derive(Clone)] -pub struct AllocatedPoint -where - G: Group, -{ - pub(crate) x: AllocatedNum, - pub(crate) y: AllocatedNum, - pub(crate) is_infinity: AllocatedNum, +pub struct AllocatedPoint { + pub(crate) x: AllocatedNum, + pub(crate) y: AllocatedNum, + pub(crate) is_infinity: AllocatedNum, } -impl AllocatedPoint +impl AllocatedPoint where - G: Group, + E: Engine, { /// Allocates a new point on the curve using coordinates provided by `coords`. /// If coords = None, it allocates the default infinity point - pub fn alloc( + pub fn alloc>( mut cs: CS, - coords: Option<(G::Base, G::Base, bool)>, - ) -> Result - where - CS: ConstraintSystem, - { + coords: Option<(E::Base, E::Base, bool)>, + ) -> Result { let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - Ok(coords.map_or(G::Base::ZERO, |c| c.0)) + Ok(coords.map_or(E::Base::ZERO, |c| c.0)) })?; let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok(coords.map_or(G::Base::ZERO, |c| c.1)) + Ok(coords.map_or(E::Base::ZERO, |c| c.1)) })?; let is_infinity = AllocatedNum::alloc(cs.namespace(|| "is_infinity"), || { Ok(if coords.map_or(true, |c| c.2) { - G::Base::ONE + E::Base::ONE } else { - G::Base::ZERO + E::Base::ZERO }) })?; cs.enforce( @@ -66,7 +60,7 @@ where /// checks if `self` is on the curve or if it is infinity pub fn check_on_curve(&self, mut cs: CS) -> Result<(), SynthesisError> where - CS: ConstraintSystem, + CS: ConstraintSystem, { // check that (x,y) is on the curve if it is not infinity // we will check that (1- is_infinity) * y^2 = (1-is_infinity) * (x^3 + Ax + B) @@ -76,13 +70,13 @@ where let x_cube = self.x.mul(cs.namespace(|| "x_cube"), &x_square)?; let rhs = AllocatedNum::alloc(cs.namespace(|| "rhs"), || { - if *self.is_infinity.get_value().get()? == G::Base::ONE { - Ok(G::Base::ZERO) + if *self.is_infinity.get_value().get()? == E::Base::ONE { + Ok(E::Base::ZERO) } else { Ok( *x_cube.get_value().get()? - + *self.x.get_value().get()? * G::get_curve_params().0 - + G::get_curve_params().1, + + *self.x.get_value().get()? * E::GE::group_params().0 + + E::GE::group_params().1, ) } })?; @@ -91,8 +85,8 @@ where || "rhs = (1-is_infinity) * (x^3 + Ax + B)", |lc| { lc + x_cube.get_variable() - + (G::get_curve_params().0, self.x.get_variable()) - + (G::get_curve_params().1, CS::one()) + + (E::GE::group_params().0, self.x.get_variable()) + + (E::GE::group_params().1, CS::one()) }, |lc| lc + CS::one() - self.is_infinity.get_variable(), |lc| lc + rhs.get_variable(), @@ -110,10 +104,7 @@ where } /// Allocates a default point on the curve, set to the identity point. - pub fn default(mut cs: CS) -> Result - where - CS: ConstraintSystem, - { + pub fn default>(mut cs: CS) -> Result { let zero = alloc_zero(cs.namespace(|| "zero")); let one = alloc_one(cs.namespace(|| "one")); @@ -128,15 +119,15 @@ where pub const fn get_coordinates( &self, ) -> ( - &AllocatedNum, - &AllocatedNum, - &AllocatedNum, + &AllocatedNum, + &AllocatedNum, + &AllocatedNum, ) { (&self.x, &self.y, &self.is_infinity) } /// Negates the provided point - pub fn negate>(&self, mut cs: CS) -> Result { + pub fn negate>(&self, mut cs: CS) -> Result { let y = AllocatedNum::alloc(cs.namespace(|| "y"), || Ok(-*self.y.get_value().get()?))?; cs.enforce( @@ -154,10 +145,10 @@ where } /// Add two points (may be equal) - pub fn add>( + pub fn add>( &self, mut cs: CS, - other: &AllocatedPoint, + other: &AllocatedPoint, ) -> Result { // Compute boolean equal indicating if self = other @@ -203,10 +194,10 @@ where /// Adds other point to this point and returns the result. Assumes that the two points are /// different and that both `other.is_infinity` and `this.is_infinty` are bits - pub fn add_internal>( + pub fn add_internal>( &self, mut cs: CS, - other: &AllocatedPoint, + other: &AllocatedPoint, equal_x: &AllocatedBit, ) -> Result { //************************************************************************/ @@ -221,9 +212,9 @@ where // NOT(NOT(self.is_ifninity) AND NOT(other.is_infinity)) let at_least_one_inf = AllocatedNum::alloc(cs.namespace(|| "at least one inf"), || { Ok( - G::Base::ONE - - (G::Base::ONE - *self.is_infinity.get_value().get()?) - * (G::Base::ONE - *other.is_infinity.get_value().get()?), + E::Base::ONE + - (E::Base::ONE - *self.is_infinity.get_value().get()?) + * (E::Base::ONE - *other.is_infinity.get_value().get()?), ) })?; cs.enforce( @@ -237,7 +228,7 @@ where let x_diff_is_actual = AllocatedNum::alloc(cs.namespace(|| "allocate x_diff_is_actual"), || { Ok(if *equal_x.get_value().get()? { - G::Base::ONE + E::Base::ONE } else { *at_least_one_inf.get_value().get()? }) @@ -259,9 +250,9 @@ where )?; let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { - let x_diff_inv = if *x_diff_is_actual.get_value().get()? == G::Base::ONE { + let x_diff_inv = if *x_diff_is_actual.get_value().get()? == E::Base::ONE { // Set to default - G::Base::ONE + E::Base::ONE } else { // Set to the actual inverse (*other.x.get_value().get()? - *self.x.get_value().get()?) @@ -366,13 +357,13 @@ where } /// Doubles the supplied point. - pub fn double>(&self, mut cs: CS) -> Result { + pub fn double>(&self, mut cs: CS) -> Result { //*************************************************************/ - // lambda = (G::Base::from(3) * self.x * self.x + G::A()) - // * (G::Base::from(2)) * self.y).invert().unwrap(); + // lambda = (E::Base::from(3) * self.x * self.x + E::GE::A()) + // * (E::Base::from(2)) * self.y).invert().unwrap(); /*************************************************************/ - // Compute tmp = (G::Base::ONE + G::Base::ONE)* self.y ? self != inf : 1 + // Compute tmp = (E::Base::ONE + E::Base::ONE)* self.y ? self != inf : 1 let tmp_actual = AllocatedNum::alloc(cs.namespace(|| "tmp_actual"), || { Ok(*self.y.get_value().get()? + *self.y.get_value().get()?) })?; @@ -385,35 +376,35 @@ where let tmp = select_one_or_num2(cs.namespace(|| "tmp"), &tmp_actual, &self.is_infinity)?; - // Now compute lambda as (G::Base::from(3) * self.x * self.x + G::A()) * tmp_inv + // Now compute lambda as (E::Base::from(3) * self.x * self.x + E::GE::A()) * tmp_inv let prod_1 = AllocatedNum::alloc(cs.namespace(|| "alloc prod 1"), || { - Ok(G::Base::from(3) * self.x.get_value().get()? * self.x.get_value().get()?) + Ok(E::Base::from(3) * self.x.get_value().get()? * self.x.get_value().get()?) })?; cs.enforce( || "Check prod 1", - |lc| lc + (G::Base::from(3), self.x.get_variable()), + |lc| lc + (E::Base::from(3), self.x.get_variable()), |lc| lc + self.x.get_variable(), |lc| lc + prod_1.get_variable(), ); let lambda = AllocatedNum::alloc(cs.namespace(|| "alloc lambda"), || { - let tmp_inv = if *self.is_infinity.get_value().get()? == G::Base::ONE { + let tmp_inv = if *self.is_infinity.get_value().get()? == E::Base::ONE { // Return default value 1 - G::Base::ONE + E::Base::ONE } else { // Return the actual inverse (*tmp.get_value().get()?).invert().unwrap() }; - Ok(tmp_inv * (*prod_1.get_value().get()? + G::get_curve_params().0)) + Ok(tmp_inv * (*prod_1.get_value().get()? + E::GE::group_params().0)) })?; cs.enforce( || "Check lambda", |lc| lc + tmp.get_variable(), |lc| lc + lambda.get_variable(), - |lc| lc + prod_1.get_variable() + (G::get_curve_params().0, CS::one()), + |lc| lc + prod_1.get_variable() + (E::GE::group_params().0, CS::one()), ); /*************************************************************/ @@ -470,12 +461,12 @@ where /// A gadget for scalar multiplication, optimized to use incomplete addition law. /// The optimization here is analogous to , /// except we use complete addition law over affine coordinates instead of projective coordinates for the tail bits - pub fn scalar_mul>( + pub fn scalar_mul>( &self, mut cs: CS, scalar_bits: &[AllocatedBit], ) -> Result { - let split_len = core::cmp::min(scalar_bits.len(), (G::Base::NUM_BITS - 2) as usize); + let split_len = core::cmp::min(scalar_bits.len(), (E::Base::NUM_BITS - 2) as usize); let (incomplete_bits, complete_bits) = scalar_bits.split_at(split_len); // we convert AllocatedPoint into AllocatedPointNonInfinity; we deal with the case where self.is_infinity = 1 below @@ -559,7 +550,7 @@ where } /// If condition outputs a otherwise outputs b - pub fn conditionally_select>( + pub fn conditionally_select>( mut cs: CS, a: &Self, b: &Self, @@ -580,7 +571,7 @@ where } /// If condition outputs a otherwise infinity - pub fn select_point_or_infinity>( + pub fn select_point_or_infinity>( mut cs: CS, a: &Self, condition: &Boolean, @@ -601,28 +592,25 @@ where #[derive(Clone)] /// `AllocatedPoint` but one that is guaranteed to be not infinity -pub struct AllocatedPointNonInfinity -where - G: Group, -{ - x: AllocatedNum, - y: AllocatedNum, +pub struct AllocatedPointNonInfinity { + x: AllocatedNum, + y: AllocatedNum, } -impl AllocatedPointNonInfinity +impl AllocatedPointNonInfinity where - G: Group, + E: Engine, { /// Creates a new `AllocatedPointNonInfinity` from the specified coordinates - pub const fn new(x: AllocatedNum, y: AllocatedNum) -> Self { + pub const fn new(x: AllocatedNum, y: AllocatedNum) -> Self { Self { x, y } } /// Allocates a new point on the curve using coordinates provided by `coords`. - pub fn alloc(mut cs: CS, coords: Option<(G::Base, G::Base)>) -> Result - where - CS: ConstraintSystem, - { + pub fn alloc>( + mut cs: CS, + coords: Option<(E::Base, E::Base)>, + ) -> Result { let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { coords.map_or(Err(SynthesisError::AssignmentMissing), |c| Ok(c.0)) })?; @@ -634,7 +622,7 @@ where } /// Turns an `AllocatedPoint` into an `AllocatedPointNonInfinity` (assumes it is not infinity) - pub fn from_allocated_point(p: &AllocatedPoint) -> Self { + pub fn from_allocated_point(p: &AllocatedPoint) -> Self { Self { x: p.x.clone(), y: p.y.clone(), @@ -644,8 +632,8 @@ where /// Returns an `AllocatedPoint` from an `AllocatedPointNonInfinity` pub fn to_allocated_point( &self, - is_infinity: &AllocatedNum, - ) -> Result, SynthesisError> { + is_infinity: &AllocatedNum, + ) -> Result, SynthesisError> { Ok(AllocatedPoint { x: self.x.clone(), y: self.y.clone(), @@ -654,19 +642,19 @@ where } /// Returns coordinates associated with the point. - pub const fn get_coordinates(&self) -> (&AllocatedNum, &AllocatedNum) { + pub const fn get_coordinates(&self) -> (&AllocatedNum, &AllocatedNum) { (&self.x, &self.y) } /// Add two points assuming self != +/- other pub fn add_incomplete(&self, mut cs: CS, other: &Self) -> Result where - CS: ConstraintSystem, + CS: ConstraintSystem, { // allocate a free variable that an honest prover sets to lambda = (y2-y1)/(x2-x1) let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { if *other.x.get_value().get()? == *self.x.get_value().get()? { - Ok(G::Base::ONE) + Ok(E::Base::ONE) } else { Ok( (*other.y.get_value().get()? - *self.y.get_value().get()?) @@ -721,19 +709,19 @@ where } /// doubles the point; since this is called with a point not at infinity, it is guaranteed to be not infinity - pub fn double_incomplete(&self, mut cs: CS) -> Result - where - CS: ConstraintSystem, - { + pub fn double_incomplete>( + &self, + mut cs: CS, + ) -> Result { // lambda = (3 x^2 + a) / 2 * y let x_sq = self.x.square(cs.namespace(|| "x_sq"))?; let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { - let n = G::Base::from(3) * x_sq.get_value().get()? + G::get_curve_params().0; - let d = G::Base::from(2) * *self.y.get_value().get()?; - if d == G::Base::ZERO { - Ok(G::Base::ONE) + let n = E::Base::from(3) * x_sq.get_value().get()? + E::GE::group_params().0; + let d = E::Base::from(2) * *self.y.get_value().get()?; + if d == E::Base::ZERO { + Ok(E::Base::ONE) } else { Ok(n * d.invert().unwrap()) } @@ -741,8 +729,8 @@ where cs.enforce( || "Check that lambda is computed correctly", |lc| lc + lambda.get_variable(), - |lc| lc + (G::Base::from(2), self.y.get_variable()), - |lc| lc + (G::Base::from(3), x_sq.get_variable()) + (G::get_curve_params().0, CS::one()), + |lc| lc + (E::Base::from(2), self.y.get_variable()), + |lc| lc + (E::Base::from(3), x_sq.get_variable()) + (E::GE::group_params().0, CS::one()), ); let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { @@ -757,7 +745,7 @@ where || "check that x is correct", |lc| lc + lambda.get_variable(), |lc| lc + lambda.get_variable(), - |lc| lc + x.get_variable() + (G::Base::from(2), self.x.get_variable()), + |lc| lc + x.get_variable() + (E::Base::from(2), self.x.get_variable()), ); let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { @@ -778,7 +766,7 @@ where } /// If condition outputs a otherwise outputs b - pub fn conditionally_select>( + pub fn conditionally_select>( mut cs: CS, a: &Self, b: &Self, @@ -794,15 +782,16 @@ where #[cfg(test)] mod tests { use super::*; - use crate::provider::{ - bn256_grumpkin::{bn256, grumpkin}, - secp_secq::{secp256k1, secq256k1}, - }; use crate::{ bellpepper::{ r1cs::{NovaShape, NovaWitness}, {solver::SatisfyingAssignment, test_shape_cs::TestShapeCS}, }, + provider::{ + bn256_grumpkin::{bn256, grumpkin, Bn256Engine, GrumpkinEngine}, + pasta::{PallasEngine, VestaEngine}, + secp_secq::{secp256k1, secq256k1, Secp256k1Engine, Secq256k1Engine}, + }, traits::snark::default_ck_hint, }; use ff::{Field, PrimeFieldBits}; @@ -810,27 +799,21 @@ mod tests { use rand::rngs::OsRng; #[derive(Debug, Clone)] - pub struct Point - where - G: Group, - { - x: G::Base, - y: G::Base, + pub struct Point { + x: E::Base, + y: E::Base, is_infinity: bool, } - impl Point - where - G: Group, - { - pub fn new(x: G::Base, y: G::Base, is_infinity: bool) -> Self { + impl Point { + pub fn new(x: E::Base, y: E::Base, is_infinity: bool) -> Self { Self { x, y, is_infinity } } pub fn random_vartime() -> Self { loop { - let x = G::Base::random(&mut OsRng); - let y = (x.square() * x + G::get_curve_params().1).sqrt(); + let x = E::Base::random(&mut OsRng); + let y = (x.square() * x + E::GE::group_params().1).sqrt(); if y.is_some().unwrap_u8() == 1 { return Self { x, @@ -842,7 +825,7 @@ mod tests { } /// Add any two points - pub fn add(&self, other: &Point) -> Self { + pub fn add(&self, other: &Point) -> Self { if self.x == other.x { // If self == other then call double if self.y == other.y { @@ -850,8 +833,8 @@ mod tests { } else { // if self.x == other.x and self.y != other.y then return infinity Self { - x: G::Base::ZERO, - y: G::Base::ZERO, + x: E::Base::ZERO, + y: E::Base::ZERO, is_infinity: true, } } @@ -861,7 +844,7 @@ mod tests { } /// Add two different points - pub fn add_internal(&self, other: &Point) -> Self { + pub fn add_internal(&self, other: &Point) -> Self { if self.is_infinity { return other.clone(); } @@ -883,16 +866,16 @@ mod tests { pub fn double(&self) -> Self { if self.is_infinity { return Self { - x: G::Base::ZERO, - y: G::Base::ZERO, + x: E::Base::ZERO, + y: E::Base::ZERO, is_infinity: true, }; } - let lambda = G::Base::from(3) + let lambda = E::Base::from(3) * self.x * self.x - * ((G::Base::ONE + G::Base::ONE) * self.y).invert().unwrap(); + * ((E::Base::ONE + E::Base::ONE) * self.y).invert().unwrap(); let x = lambda * lambda - self.x - self.x; let y = lambda * (self.x - x) - self.y; Self { @@ -902,10 +885,10 @@ mod tests { } } - pub fn scalar_mul(&self, scalar: &G::Scalar) -> Self { + pub fn scalar_mul(&self, scalar: &E::Scalar) -> Self { let mut res = Self { - x: G::Base::ZERO, - y: G::Base::ZERO, + x: E::Base::ZERO, + y: E::Base::ZERO, is_infinity: true, }; @@ -921,17 +904,17 @@ mod tests { } // Allocate a random point. Only used for testing - pub fn alloc_random_point>( + pub fn alloc_random_point>( mut cs: CS, - ) -> Result, SynthesisError> { + ) -> Result, SynthesisError> { // get a random point - let p = Point::::random_vartime(); + let p = Point::::random_vartime(); AllocatedPoint::alloc(cs.namespace(|| "alloc p"), Some((p.x, p.y, p.is_infinity))) } /// Make the point io - pub fn inputize_allocted_point>( - p: &AllocatedPoint, + pub fn inputize_allocted_point>( + p: &AllocatedPoint, mut cs: CS, ) { let _ = p.x.inputize(cs.namespace(|| "Input point.x")); @@ -943,27 +926,27 @@ mod tests { #[test] fn test_ecc_ops() { - test_ecc_ops_with::(); - test_ecc_ops_with::(); + test_ecc_ops_with::(); + test_ecc_ops_with::(); - test_ecc_ops_with::(); - test_ecc_ops_with::(); + test_ecc_ops_with::(); + test_ecc_ops_with::(); - test_ecc_ops_with::(); - test_ecc_ops_with::(); + test_ecc_ops_with::(); + test_ecc_ops_with::(); } - fn test_ecc_ops_with() + fn test_ecc_ops_with() where - C: CurveAffine, - G: Group, + E: Engine, + C: CurveAffine, { // perform some curve arithmetic - let a = Point::::random_vartime(); - let b = Point::::random_vartime(); + let a = Point::::random_vartime(); + let b = Point::::random_vartime(); let c = a.add(&b); let d = a.double(); - let s = ::Scalar::random(&mut OsRng); + let s = ::Scalar::random(&mut OsRng); let e = a.scalar_mul(&s); // perform the same computation by translating to curve types @@ -1006,15 +989,15 @@ mod tests { assert_eq!(e_curve, e_curve_2); } - fn synthesize_smul(mut cs: CS) -> (AllocatedPoint, AllocatedPoint, G::Scalar) + fn synthesize_smul(mut cs: CS) -> (AllocatedPoint, AllocatedPoint, E::Scalar) where - G: Group, - CS: ConstraintSystem, + E: Engine, + CS: ConstraintSystem, { let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); inputize_allocted_point(&a, cs.namespace(|| "inputize a")); - let s = G::Scalar::random(&mut OsRng); + let s = E::Scalar::random(&mut OsRng); // Allocate bits for s let bits: Vec = s .to_le_bits() @@ -1030,41 +1013,41 @@ mod tests { #[test] fn test_ecc_circuit_ops() { - test_ecc_circuit_ops_with::(); - test_ecc_circuit_ops_with::(); + test_ecc_circuit_ops_with::(); + test_ecc_circuit_ops_with::(); - test_ecc_circuit_ops_with::(); - test_ecc_circuit_ops_with::(); + test_ecc_circuit_ops_with::(); + test_ecc_circuit_ops_with::(); - test_ecc_circuit_ops_with::(); - test_ecc_circuit_ops_with::(); + test_ecc_circuit_ops_with::(); + test_ecc_circuit_ops_with::(); } - fn test_ecc_circuit_ops_with() + fn test_ecc_circuit_ops_with() where - G1: Group::Scalar>, - G2: Group::Scalar>, + E1: Engine::Scalar>, + E2: Engine::Scalar>, { // First create the shape - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = synthesize_smul::(cs.namespace(|| "synthesize")); + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = synthesize_smul::(cs.namespace(|| "synthesize")); println!("Number of constraints: {}", cs.num_constraints()); let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); // Then the satisfying assignment - let mut cs = SatisfyingAssignment::::new(); - let (a, e, s) = synthesize_smul::(cs.namespace(|| "synthesize")); + let mut cs = SatisfyingAssignment::::new(); + let (a, e, s) = synthesize_smul::(cs.namespace(|| "synthesize")); let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - let a_p: Point = Point::new( + let a_p: Point = Point::new( a.x.get_value().unwrap(), a.y.get_value().unwrap(), - a.is_infinity.get_value().unwrap() == ::Base::ONE, + a.is_infinity.get_value().unwrap() == ::Base::ONE, ); - let e_p: Point = Point::new( + let e_p: Point = Point::new( e.x.get_value().unwrap(), e.y.get_value().unwrap(), - e.is_infinity.get_value().unwrap() == ::Base::ONE, + e.is_infinity.get_value().unwrap() == ::Base::ONE, ); let e_new = a_p.scalar_mul(&s); assert!(e_p.x == e_new.x && e_p.y == e_new.y); @@ -1072,10 +1055,10 @@ mod tests { assert!(shape.is_sat(&ck, &inst, &witness).is_ok()); } - fn synthesize_add_equal(mut cs: CS) -> (AllocatedPoint, AllocatedPoint) + fn synthesize_add_equal(mut cs: CS) -> (AllocatedPoint, AllocatedPoint) where - G: Group, - CS: ConstraintSystem, + E: Engine, + CS: ConstraintSystem, { let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); inputize_allocted_point(&a, cs.namespace(|| "inputize a")); @@ -1086,40 +1069,40 @@ mod tests { #[test] fn test_ecc_circuit_add_equal() { - test_ecc_circuit_add_equal_with::(); - test_ecc_circuit_add_equal_with::(); + test_ecc_circuit_add_equal_with::(); + test_ecc_circuit_add_equal_with::(); - test_ecc_circuit_add_equal_with::(); - test_ecc_circuit_add_equal_with::(); + test_ecc_circuit_add_equal_with::(); + test_ecc_circuit_add_equal_with::(); - test_ecc_circuit_add_equal_with::(); - test_ecc_circuit_add_equal_with::(); + test_ecc_circuit_add_equal_with::(); + test_ecc_circuit_add_equal_with::(); } - fn test_ecc_circuit_add_equal_with() + fn test_ecc_circuit_add_equal_with() where - G1: Group::Scalar>, - G2: Group::Scalar>, + E1: Engine::Scalar>, + E2: Engine::Scalar>, { // First create the shape - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); println!("Number of constraints: {}", cs.num_constraints()); let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); // Then the satisfying assignment - let mut cs = SatisfyingAssignment::::new(); - let (a, e) = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); + let mut cs = SatisfyingAssignment::::new(); + let (a, e) = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - let a_p: Point = Point::new( + let a_p: Point = Point::new( a.x.get_value().unwrap(), a.y.get_value().unwrap(), - a.is_infinity.get_value().unwrap() == ::Base::ONE, + a.is_infinity.get_value().unwrap() == ::Base::ONE, ); - let e_p: Point = Point::new( + let e_p: Point = Point::new( e.x.get_value().unwrap(), e.y.get_value().unwrap(), - e.is_infinity.get_value().unwrap() == ::Base::ONE, + e.is_infinity.get_value().unwrap() == ::Base::ONE, ); let e_new = a_p.add(&a_p); assert!(e_p.x == e_new.x && e_p.y == e_new.y); @@ -1127,16 +1110,16 @@ mod tests { assert!(shape.is_sat(&ck, &inst, &witness).is_ok()); } - fn synthesize_add_negation(mut cs: CS) -> AllocatedPoint + fn synthesize_add_negation(mut cs: CS) -> AllocatedPoint where - G: Group, - CS: ConstraintSystem, + E: Engine, + CS: ConstraintSystem, { let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); inputize_allocted_point(&a, cs.namespace(|| "inputize a")); let b = &mut a.clone(); b.y = AllocatedNum::alloc(cs.namespace(|| "allocate negation of a"), || { - Ok(G::Base::ZERO) + Ok(E::Base::ZERO) }) .unwrap(); inputize_allocted_point(b, cs.namespace(|| "inputize b")); @@ -1146,35 +1129,35 @@ mod tests { #[test] fn test_ecc_circuit_add_negation() { - test_ecc_circuit_add_negation_with::(); - test_ecc_circuit_add_negation_with::(); + test_ecc_circuit_add_negation_with::(); + test_ecc_circuit_add_negation_with::(); - test_ecc_circuit_add_negation_with::(); - test_ecc_circuit_add_negation_with::(); + test_ecc_circuit_add_negation_with::(); + test_ecc_circuit_add_negation_with::(); - test_ecc_circuit_add_negation_with::(); - test_ecc_circuit_add_negation_with::(); + test_ecc_circuit_add_negation_with::(); + test_ecc_circuit_add_negation_with::(); } - fn test_ecc_circuit_add_negation_with() + fn test_ecc_circuit_add_negation_with() where - G1: Group::Scalar>, - G2: Group::Scalar>, + E1: Engine::Scalar>, + E2: Engine::Scalar>, { // First create the shape - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = synthesize_add_negation::(cs.namespace(|| "synthesize add equal")); + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = synthesize_add_negation::(cs.namespace(|| "synthesize add equal")); println!("Number of constraints: {}", cs.num_constraints()); let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); // Then the satisfying assignment - let mut cs = SatisfyingAssignment::::new(); - let e = synthesize_add_negation::(cs.namespace(|| "synthesize add negation")); + let mut cs = SatisfyingAssignment::::new(); + let e = synthesize_add_negation::(cs.namespace(|| "synthesize add negation")); let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - let e_p: Point = Point::new( + let e_p: Point = Point::new( e.x.get_value().unwrap(), e.y.get_value().unwrap(), - e.is_infinity.get_value().unwrap() == ::Base::ONE, + e.is_infinity.get_value().unwrap() == ::Base::ONE, ); assert!(e_p.is_infinity); // Make sure that it is satisfiable diff --git a/src/gadgets/nonnative/util.rs b/src/gadgets/nonnative/util.rs index 4e7860f1b..f26c09f6a 100644 --- a/src/gadgets/nonnative/util.rs +++ b/src/gadgets/nonnative/util.rs @@ -32,10 +32,10 @@ pub struct Bitvector { impl Bit { /// Allocate a variable in the constraint system which can only be a /// boolean value. - pub fn alloc(mut cs: CS, value: Option) -> Result - where - CS: ConstraintSystem, - { + pub fn alloc>( + mut cs: CS, + value: Option, + ) -> Result { let var = cs.alloc( || "boolean", || { diff --git a/src/gadgets/r1cs.rs b/src/gadgets/r1cs.rs index ebf8d7f0c..9122578a2 100644 --- a/src/gadgets/r1cs.rs +++ b/src/gadgets/r1cs.rs @@ -13,7 +13,7 @@ use crate::{ }, }, r1cs::{R1CSInstance, RelaxedR1CSInstance}, - traits::{commitment::CommitmentTrait, Group, ROCircuitTrait, ROConstantsCircuit}, + traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, }; use bellpepper::gadgets::{boolean::Boolean, num::AllocatedNum, Assignment}; use bellpepper_core::{ConstraintSystem, SynthesisError}; @@ -21,17 +21,17 @@ use ff::Field; /// An Allocated R1CS Instance #[derive(Clone)] -pub struct AllocatedR1CSInstance { - pub(crate) W: AllocatedPoint, - pub(crate) X0: AllocatedNum, - pub(crate) X1: AllocatedNum, +pub struct AllocatedR1CSInstance { + pub(crate) W: AllocatedPoint, + pub(crate) X0: AllocatedNum, + pub(crate) X1: AllocatedNum, } -impl AllocatedR1CSInstance { +impl AllocatedR1CSInstance { /// Takes the r1cs instance and creates a new allocated r1cs instance - pub fn alloc::Base>>( + pub fn alloc::Base>>( mut cs: CS, - u: Option<&R1CSInstance>, + u: Option<&R1CSInstance>, ) -> Result { let W = AllocatedPoint::alloc( cs.namespace(|| "allocate W"), @@ -39,14 +39,14 @@ impl AllocatedR1CSInstance { )?; W.check_on_curve(cs.namespace(|| "check W on curve"))?; - let X0 = alloc_scalar_as_base::(cs.namespace(|| "allocate X[0]"), u.map(|u| u.X[0]))?; - let X1 = alloc_scalar_as_base::(cs.namespace(|| "allocate X[1]"), u.map(|u| u.X[1]))?; + let X0 = alloc_scalar_as_base::(cs.namespace(|| "allocate X[0]"), u.map(|u| u.X[0]))?; + let X1 = alloc_scalar_as_base::(cs.namespace(|| "allocate X[1]"), u.map(|u| u.X[1]))?; Ok(AllocatedR1CSInstance { W, X0, X1 }) } /// Absorb the provided instance in the RO - pub fn absorb_in_ro(&self, ro: &mut G::ROCircuit) { + pub fn absorb_in_ro(&self, ro: &mut E::ROCircuit) { ro.absorb(&self.W.x); ro.absorb(&self.W.y); ro.absorb(&self.W.is_infinity); @@ -57,19 +57,19 @@ impl AllocatedR1CSInstance { /// An Allocated Relaxed R1CS Instance #[derive(Clone)] -pub struct AllocatedRelaxedR1CSInstance { - pub(crate) W: AllocatedPoint, - pub(crate) E: AllocatedPoint, - pub(crate) u: AllocatedNum, - pub(crate) X0: BigNat, - pub(crate) X1: BigNat, +pub struct AllocatedRelaxedR1CSInstance { + pub(crate) W: AllocatedPoint, + pub(crate) E: AllocatedPoint, + pub(crate) u: AllocatedNum, + pub(crate) X0: BigNat, + pub(crate) X1: BigNat, } -impl AllocatedRelaxedR1CSInstance { +impl AllocatedRelaxedR1CSInstance { /// Allocates the given `RelaxedR1CSInstance` as a witness of the circuit - pub fn alloc::Base>>( + pub fn alloc::Base>>( mut cs: CS, - inst: Option<&RelaxedR1CSInstance>, + inst: Option<&RelaxedR1CSInstance>, limb_width: usize, n_limbs: usize, ) -> Result { @@ -86,21 +86,21 @@ impl AllocatedRelaxedR1CSInstance { inst.map(|inst| inst.comm_E.to_coordinates()), )?; - // u << |G::Base| despite the fact that u is a scalar. - // So we parse all of its bytes as a G::Base element - let u = alloc_scalar_as_base::(cs.namespace(|| "allocate u"), inst.map(|inst| inst.u))?; + // u << |E::Base| despite the fact that u is a scalar. + // So we parse all of its bytes as a E::Base element + let u = alloc_scalar_as_base::(cs.namespace(|| "allocate u"), inst.map(|inst| inst.u))?; // Allocate X0 and X1. If the input instance is None, then allocate default values 0. let X0 = BigNat::alloc_from_nat( cs.namespace(|| "allocate X[0]"), - || Ok(f_to_nat(&inst.map_or(G::Scalar::ZERO, |inst| inst.X[0]))), + || Ok(f_to_nat(&inst.map_or(E::Scalar::ZERO, |inst| inst.X[0]))), limb_width, n_limbs, )?; let X1 = BigNat::alloc_from_nat( cs.namespace(|| "allocate X[1]"), - || Ok(f_to_nat(&inst.map_or(G::Scalar::ZERO, |inst| inst.X[1]))), + || Ok(f_to_nat(&inst.map_or(E::Scalar::ZERO, |inst| inst.X[1]))), limb_width, n_limbs, )?; @@ -110,7 +110,7 @@ impl AllocatedRelaxedR1CSInstance { /// Allocates the hardcoded default `RelaxedR1CSInstance` in the circuit. /// W = E = 0, u = 0, X0 = X1 = 0 - pub fn default::Base>>( + pub fn default::Base>>( mut cs: CS, limb_width: usize, n_limbs: usize, @@ -125,14 +125,14 @@ impl AllocatedRelaxedR1CSInstance { // relaxed R1CS instance with the the checked default values of W, E, and u must still be satisfying let X0 = BigNat::alloc_from_nat( cs.namespace(|| "allocate x_default[0]"), - || Ok(f_to_nat(&G::Scalar::ZERO)), + || Ok(f_to_nat(&E::Scalar::ZERO)), limb_width, n_limbs, )?; let X1 = BigNat::alloc_from_nat( cs.namespace(|| "allocate x_default[1]"), - || Ok(f_to_nat(&G::Scalar::ZERO)), + || Ok(f_to_nat(&E::Scalar::ZERO)), limb_width, n_limbs, )?; @@ -142,9 +142,9 @@ impl AllocatedRelaxedR1CSInstance { /// Allocates the R1CS Instance as a `RelaxedR1CSInstance` in the circuit. /// E = 0, u = 1 - pub fn from_r1cs_instance::Base>>( + pub fn from_r1cs_instance::Base>>( mut cs: CS, - inst: AllocatedR1CSInstance, + inst: AllocatedR1CSInstance, limb_width: usize, n_limbs: usize, ) -> Result { @@ -176,10 +176,10 @@ impl AllocatedRelaxedR1CSInstance { } /// Absorb the provided instance in the RO - pub fn absorb_in_ro::Base>>( + pub fn absorb_in_ro::Base>>( &self, mut cs: CS, - ro: &mut G::ROCircuit, + ro: &mut E::ROCircuit, ) -> Result<(), SynthesisError> { ro.absorb(&self.W.x); ro.absorb(&self.W.y); @@ -198,7 +198,7 @@ impl AllocatedRelaxedR1CSInstance { .map(|(i, limb)| { limb.as_allocated_num(cs.namespace(|| format!("convert limb {i} of X_r[0] to num"))) }) - .collect::>, _>>()?; + .collect::>, _>>()?; // absorb each of the limbs of X[0] for limb in X0_bn { @@ -214,7 +214,7 @@ impl AllocatedRelaxedR1CSInstance { .map(|(i, limb)| { limb.as_allocated_num(cs.namespace(|| format!("convert limb {i} of X_r[1] to num"))) }) - .collect::>, _>>()?; + .collect::>, _>>()?; // absorb each of the limbs of X[1] for limb in X1_bn { @@ -225,18 +225,18 @@ impl AllocatedRelaxedR1CSInstance { } /// Folds self with a relaxed r1cs instance and returns the result - pub fn fold_with_r1cs::Base>>( + pub fn fold_with_r1cs::Base>>( &self, mut cs: CS, - params: &AllocatedNum, // hash of R1CSShape of F' - u: &AllocatedR1CSInstance, - T: &AllocatedPoint, - ro_consts: ROConstantsCircuit, + params: &AllocatedNum, // hash of R1CSShape of F' + u: &AllocatedR1CSInstance, + T: &AllocatedPoint, + ro_consts: ROConstantsCircuit, limb_width: usize, n_limbs: usize, - ) -> Result, SynthesisError> { + ) -> Result, SynthesisError> { // Compute r: - let mut ro = G::ROCircuit::new(ro_consts, NUM_FE_FOR_RO); + let mut ro = E::ROCircuit::new(ro_consts, NUM_FE_FOR_RO); ro.absorb(params); self.absorb_in_ro(cs.namespace(|| "absorb running instance"), &mut ro)?; u.absorb_in_ro(&mut ro); @@ -277,7 +277,7 @@ impl AllocatedRelaxedR1CSInstance { // Allocate the order of the non-native field as a constant let m_bn = alloc_bignat_constant( cs.namespace(|| "alloc m"), - &G::get_curve_params().2, + &E::GE::group_params().2, limb_width, n_limbs, )?; @@ -322,26 +322,26 @@ impl AllocatedRelaxedR1CSInstance { } /// If the condition is true then returns this otherwise it returns the other - pub fn conditionally_select::Base>>( + pub fn conditionally_select::Base>>( &self, cs: CS, - other: &AllocatedRelaxedR1CSInstance, + other: &AllocatedRelaxedR1CSInstance, condition: &Boolean, - ) -> Result, SynthesisError> { + ) -> Result, SynthesisError> { conditionally_select_alloc_relaxed_r1cs(cs, self, other, condition) } } /// c = cond ? a: b, where a, b: `AllocatedRelaxedR1CSInstance` pub fn conditionally_select_alloc_relaxed_r1cs< - G: Group, - CS: ConstraintSystem<::Base>, + E: Engine, + CS: ConstraintSystem<::Base>, >( mut cs: CS, - a: &AllocatedRelaxedR1CSInstance, - b: &AllocatedRelaxedR1CSInstance, + a: &AllocatedRelaxedR1CSInstance, + b: &AllocatedRelaxedR1CSInstance, condition: &Boolean, -) -> Result, SynthesisError> { +) -> Result, SynthesisError> { let c = AllocatedRelaxedR1CSInstance { W: conditionally_select_point( cs.namespace(|| "W = cond ? a.W : b.W"), @@ -380,14 +380,14 @@ pub fn conditionally_select_alloc_relaxed_r1cs< #[allow(dead_code)] /// c = cond ? a: b, where a, b: `Vec` pub fn conditionally_select_vec_allocated_relaxed_r1cs_instance< - G: Group, - CS: ConstraintSystem<::Base>, + E: Engine, + CS: ConstraintSystem<::Base>, >( mut cs: CS, - a: &[AllocatedRelaxedR1CSInstance], - b: &[AllocatedRelaxedR1CSInstance], + a: &[AllocatedRelaxedR1CSInstance], + b: &[AllocatedRelaxedR1CSInstance], condition: &Boolean, -) -> Result>, SynthesisError> { +) -> Result>, SynthesisError> { a.iter() .enumerate() .zip(b.iter()) @@ -398,16 +398,16 @@ pub fn conditionally_select_vec_allocated_relaxed_r1cs_instance< condition, ) }) - .collect::>, _>>() + .collect::>, _>>() } /// c = cond ? a: b, where a, b: `AllocatedPoint` -pub fn conditionally_select_point::Base>>( +pub fn conditionally_select_point::Base>>( mut cs: CS, - a: &AllocatedPoint, - b: &AllocatedPoint, + a: &AllocatedPoint, + b: &AllocatedPoint, condition: &Boolean, -) -> Result, SynthesisError> { +) -> Result, SynthesisError> { let c = AllocatedPoint { x: conditionally_select( cs.namespace(|| "x = cond ? a.x : b.x"), diff --git a/src/gadgets/utils.rs b/src/gadgets/utils.rs index f8866118f..b8f242faf 100644 --- a/src/gadgets/utils.rs +++ b/src/gadgets/utils.rs @@ -1,6 +1,6 @@ //! This module implements various low-level gadgets use super::nonnative::bignat::{nat_to_limbs, BigNat}; -use crate::traits::Group; +use crate::traits::Engine; use bellpepper::gadgets::Assignment; use bellpepper_core::{ boolean::{AllocatedBit, Boolean}, @@ -68,20 +68,20 @@ pub fn alloc_one>(mut cs: CS) -> Allocate one } -/// Allocate a scalar as a base. Only to be used if the scalar fits in base! -pub fn alloc_scalar_as_base( +/// Allocate a scalar as a base. Only to be used is the scalar fits in base! +pub fn alloc_scalar_as_base( mut cs: CS, - input: Option, -) -> Result, SynthesisError> + input: Option, +) -> Result, SynthesisError> where - G: Group, - ::Scalar: PrimeFieldBits, - CS: ConstraintSystem<::Base>, + E: Engine, + ::Scalar: PrimeFieldBits, + CS: ConstraintSystem<::Base>, { AllocatedNum::alloc(cs.namespace(|| "allocate scalar as base"), || { - let input_bits = input.unwrap_or(G::Scalar::ZERO).clone().to_le_bits(); - let mut mult = G::Base::ONE; - let mut val = G::Base::ZERO; + let input_bits = input.unwrap_or(E::Scalar::ZERO).clone().to_le_bits(); + let mut mult = E::Base::ONE; + let mut val = E::Base::ZERO; for bit in input_bits { if bit { val += mult; @@ -93,10 +93,10 @@ where } /// interepret scalar as base -pub fn scalar_as_base(input: G::Scalar) -> G::Base { +pub fn scalar_as_base(input: E::Scalar) -> E::Base { let input_bits = input.to_le_bits(); - let mut mult = G::Base::ONE; - let mut val = G::Base::ZERO; + let mut mult = E::Base::ONE; + let mut val = E::Base::ZERO; for bit in input_bits { if bit { val += mult; diff --git a/src/lib.rs b/src/lib.rs index 62af3ee97..f4b0d1cae 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -53,23 +53,23 @@ use traits::{ circuit::StepCircuit, commitment::{CommitmentEngineTrait, CommitmentTrait}, snark::RelaxedR1CSSNARKTrait, - AbsorbInROTrait, Group, ROConstants, ROConstantsCircuit, ROTrait, + AbsorbInROTrait, Engine, ROConstants, ROConstantsCircuit, ROTrait, }; /// A type that holds parameters for the primary and secondary circuits of Nova and SuperNova #[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Abomonation)] #[serde(bound = "")] -#[abomonation_bounds(where ::Repr: Abomonation)] -pub struct CircuitShape { +#[abomonation_bounds(where ::Repr: Abomonation)] +pub struct CircuitShape { F_arity: usize, - r1cs_shape: R1CSShape, + r1cs_shape: R1CSShape, } -impl SimpleDigestible for CircuitShape {} +impl SimpleDigestible for CircuitShape {} -impl CircuitShape { +impl CircuitShape { /// Create a new `CircuitShape` - pub fn new(r1cs_shape: R1CSShape, F_arity: usize) -> Self { + pub fn new(r1cs_shape: R1CSShape, F_arity: usize) -> Self { Self { F_arity, r1cs_shape, @@ -77,8 +77,8 @@ impl CircuitShape { } /// Return the [CircuitShape]' digest. - pub fn digest(&self) -> G::Scalar { - let dc: DigestComputer<'_, ::Scalar, CircuitShape> = DigestComputer::new(self); + pub fn digest(&self) -> E::Scalar { + let dc: DigestComputer<'_, ::Scalar, CircuitShape> = DigestComputer::new(self); dc.digest().expect("Failure in computing digest") } } @@ -88,53 +88,53 @@ impl CircuitShape { #[serde(bound = "")] #[abomonation_bounds( where - G1: Group::Scalar>, - G2: Group::Scalar>, - C1: StepCircuit, - C2: StepCircuit, - ::Repr: Abomonation, - ::Repr: Abomonation, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + C1: StepCircuit, + C2: StepCircuit, + ::Repr: Abomonation, + ::Repr: Abomonation, )] -pub struct PublicParams +pub struct PublicParams where - G1: Group::Scalar>, - G2: Group::Scalar>, - C1: StepCircuit, - C2: StepCircuit, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + C1: StepCircuit, + C2: StepCircuit, { F_arity_primary: usize, F_arity_secondary: usize, - ro_consts_primary: ROConstants, - ro_consts_circuit_primary: ROConstantsCircuit, - ck_primary: CommitmentKey, - circuit_shape_primary: CircuitShape, - ro_consts_secondary: ROConstants, - ro_consts_circuit_secondary: ROConstantsCircuit, - ck_secondary: CommitmentKey, - circuit_shape_secondary: CircuitShape, + ro_consts_primary: ROConstants, + ro_consts_circuit_primary: ROConstantsCircuit, + ck_primary: CommitmentKey, + circuit_shape_primary: CircuitShape, + ro_consts_secondary: ROConstants, + ro_consts_circuit_secondary: ROConstantsCircuit, + ck_secondary: CommitmentKey, + circuit_shape_secondary: CircuitShape, augmented_circuit_params_primary: NovaAugmentedCircuitParams, augmented_circuit_params_secondary: NovaAugmentedCircuitParams, #[abomonation_skip] #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, + digest: OnceCell, _p: PhantomData<(C1, C2)>, } -impl SimpleDigestible for PublicParams +impl SimpleDigestible for PublicParams where - G1: Group::Scalar>, - G2: Group::Scalar>, - C1: StepCircuit, - C2: StepCircuit, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + C1: StepCircuit, + C2: StepCircuit, { } -impl PublicParams +impl PublicParams where - G1: Group::Scalar>, - G2: Group::Scalar>, - C1: StepCircuit, - C2: StepCircuit, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + C1: StepCircuit, + C2: StepCircuit, { /// Set up builder to create `PublicParams` for a pair of circuits `C1` and `C2`. /// @@ -159,67 +159,67 @@ where /// # Example /// /// ```rust - /// # use pasta_curves::{vesta, pallas}; /// # use nova_snark::spartan::ppsnark::RelaxedR1CSSNARK; /// # use nova_snark::provider::ipa_pc::EvaluationEngine; - /// # use nova_snark::traits::{circuit::TrivialCircuit, Group, snark::RelaxedR1CSSNARKTrait}; + /// # use nova_snark::provider::pasta::{PallasEngine, VestaEngine}; + /// # use nova_snark::traits::{circuit::TrivialCircuit, Engine, snark::RelaxedR1CSSNARKTrait}; /// use nova_snark::PublicParams; /// - /// type G1 = pallas::Point; - /// type G2 = vesta::Point; - /// type EE = EvaluationEngine; - /// type SPrime = RelaxedR1CSSNARK>; + /// type E1 = PallasEngine; + /// type E2 = VestaEngine; + /// type EE = EvaluationEngine; + /// type SPrime = RelaxedR1CSSNARK>; /// - /// let circuit1 = TrivialCircuit::<::Scalar>::default(); - /// let circuit2 = TrivialCircuit::<::Scalar>::default(); + /// let circuit1 = TrivialCircuit::<::Scalar>::default(); + /// let circuit2 = TrivialCircuit::<::Scalar>::default(); /// // Only relevant for a SNARK using computational commitments, pass &(|_| 0) /// // or &*nova_snark::traits::snark::default_ck_hint() otherwise. - /// let ck_hint1 = &*SPrime::::ck_floor(); - /// let ck_hint2 = &*SPrime::::ck_floor(); + /// let ck_hint1 = &*SPrime::::ck_floor(); + /// let ck_hint2 = &*SPrime::::ck_floor(); /// /// let pp = PublicParams::setup(&circuit1, &circuit2, ck_hint1, ck_hint2); /// ``` pub fn setup( c_primary: &C1, c_secondary: &C2, - ck_hint1: &CommitmentKeyHint, - ck_hint2: &CommitmentKeyHint, + ck_hint1: &CommitmentKeyHint, + ck_hint2: &CommitmentKeyHint, ) -> Self { let augmented_circuit_params_primary = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); let augmented_circuit_params_secondary = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); - let ro_consts_primary: ROConstants = ROConstants::::default(); - let ro_consts_secondary: ROConstants = ROConstants::::default(); + let ro_consts_primary: ROConstants = ROConstants::::default(); + let ro_consts_secondary: ROConstants = ROConstants::::default(); let F_arity_primary = c_primary.arity(); let F_arity_secondary = c_secondary.arity(); - // ro_consts_circuit_primary are parameterized by G2 because the type alias uses G2::Base = G1::Scalar - let ro_consts_circuit_primary: ROConstantsCircuit = ROConstantsCircuit::::default(); - let ro_consts_circuit_secondary: ROConstantsCircuit = ROConstantsCircuit::::default(); + // ro_consts_circuit_primary are parameterized by E2 because the type alias uses E2::Base = E1::Scalar + let ro_consts_circuit_primary: ROConstantsCircuit = ROConstantsCircuit::::default(); + let ro_consts_circuit_secondary: ROConstantsCircuit = ROConstantsCircuit::::default(); // Initialize ck for the primary - let circuit_primary: NovaAugmentedCircuit<'_, G2, C1> = NovaAugmentedCircuit::new( + let circuit_primary: NovaAugmentedCircuit<'_, E2, C1> = NovaAugmentedCircuit::new( &augmented_circuit_params_primary, None, c_primary, ro_consts_circuit_primary.clone(), ); - let mut cs: ShapeCS = ShapeCS::new(); + let mut cs: ShapeCS = ShapeCS::new(); let _ = circuit_primary.synthesize(&mut cs); let (r1cs_shape_primary, ck_primary) = cs.r1cs_shape_and_key(ck_hint1); let circuit_shape_primary = CircuitShape::new(r1cs_shape_primary, F_arity_primary); // Initialize ck for the secondary - let circuit_secondary: NovaAugmentedCircuit<'_, G1, C2> = NovaAugmentedCircuit::new( + let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( &augmented_circuit_params_secondary, None, c_secondary, ro_consts_circuit_secondary.clone(), ); - let mut cs: ShapeCS = ShapeCS::new(); + let mut cs: ShapeCS = ShapeCS::new(); let _ = circuit_secondary.synthesize(&mut cs); let (r1cs_shape_secondary, ck_secondary) = cs.r1cs_shape_and_key(ck_hint2); let circuit_shape_secondary = CircuitShape::new(r1cs_shape_secondary, F_arity_secondary); @@ -243,7 +243,7 @@ where } /// Retrieve the digest of the public parameters. - pub fn digest(&self) -> G1::Scalar { + pub fn digest(&self) -> E1::Scalar { self .digest .get_or_try_init(|| DigestComputer::new(self).digest()) @@ -271,51 +271,51 @@ where /// A SNARK that proves the correct execution of an incremental computation #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "")] -pub struct RecursiveSNARK +pub struct RecursiveSNARK where - G1: Group::Scalar>, - G2: Group::Scalar>, - C1: StepCircuit, - C2: StepCircuit, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + C1: StepCircuit, + C2: StepCircuit, { - z0_primary: Vec, - z0_secondary: Vec, - r_W_primary: RelaxedR1CSWitness, - r_U_primary: RelaxedR1CSInstance, - r_W_secondary: RelaxedR1CSWitness, - r_U_secondary: RelaxedR1CSInstance, - l_w_secondary: R1CSWitness, - l_u_secondary: R1CSInstance, + z0_primary: Vec, + z0_secondary: Vec, + r_W_primary: RelaxedR1CSWitness, + r_U_primary: RelaxedR1CSInstance, + r_W_secondary: RelaxedR1CSWitness, + r_U_secondary: RelaxedR1CSInstance, + l_w_secondary: R1CSWitness, + l_u_secondary: R1CSInstance, i: usize, - zi_primary: Vec, - zi_secondary: Vec, + zi_primary: Vec, + zi_secondary: Vec, _p: PhantomData<(C1, C2)>, } -impl RecursiveSNARK +impl RecursiveSNARK where - G1: Group::Scalar>, - G2: Group::Scalar>, - C1: StepCircuit, - C2: StepCircuit, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + C1: StepCircuit, + C2: StepCircuit, { /// Create new instance of recursive SNARK pub fn new( - pp: &PublicParams, + pp: &PublicParams, c_primary: &C1, c_secondary: &C2, - z0_primary: &[G1::Scalar], - z0_secondary: &[G2::Scalar], + z0_primary: &[E1::Scalar], + z0_secondary: &[E2::Scalar], ) -> Result { if z0_primary.len() != pp.F_arity_primary || z0_secondary.len() != pp.F_arity_secondary { return Err(NovaError::InvalidInitialInputLength); } // base case for the primary - let mut cs_primary = SatisfyingAssignment::::new(); - let inputs_primary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( - scalar_as_base::(pp.digest()), - G1::Scalar::ZERO, + let mut cs_primary = SatisfyingAssignment::::new(); + let inputs_primary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( + scalar_as_base::(pp.digest()), + E1::Scalar::ZERO, z0_primary.to_vec(), None, None, @@ -323,7 +323,7 @@ where None, ); - let circuit_primary: NovaAugmentedCircuit<'_, G2, C1> = NovaAugmentedCircuit::new( + let circuit_primary: NovaAugmentedCircuit<'_, E2, C1> = NovaAugmentedCircuit::new( &pp.augmented_circuit_params_primary, Some(inputs_primary), c_primary, @@ -339,17 +339,17 @@ where .expect("Nova error unsat"); // base case for the secondary - let mut cs_secondary = SatisfyingAssignment::::new(); - let inputs_secondary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( + let mut cs_secondary = SatisfyingAssignment::::new(); + let inputs_secondary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( pp.digest(), - G2::Scalar::ZERO, + E2::Scalar::ZERO, z0_secondary.to_vec(), None, None, Some(u_primary.clone()), None, ); - let circuit_secondary: NovaAugmentedCircuit<'_, G1, C2> = NovaAugmentedCircuit::new( + let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( &pp.augmented_circuit_params_secondary, Some(inputs_secondary), c_secondary, @@ -378,9 +378,9 @@ where // IVC proof for the secondary circuit let l_w_secondary = w_secondary; let l_u_secondary = u_secondary; - let r_W_secondary = RelaxedR1CSWitness::::default(&pp.circuit_shape_secondary.r1cs_shape); + let r_W_secondary = RelaxedR1CSWitness::::default(&pp.circuit_shape_secondary.r1cs_shape); let r_U_secondary = - RelaxedR1CSInstance::::default(&pp.ck_secondary, &pp.circuit_shape_secondary.r1cs_shape); + RelaxedR1CSInstance::::default(&pp.ck_secondary, &pp.circuit_shape_secondary.r1cs_shape); assert!( !(zi_primary.len() != pp.F_arity_primary || zi_secondary.len() != pp.F_arity_secondary), @@ -390,13 +390,13 @@ where let zi_primary = zi_primary .iter() .map(|v| v.get_value().ok_or(NovaError::SynthesisError)) - .collect::::Scalar>, NovaError>>() + .collect::::Scalar>, NovaError>>() .expect("Nova error synthesis"); let zi_secondary = zi_secondary .iter() .map(|v| v.get_value().ok_or(NovaError::SynthesisError)) - .collect::::Scalar>, NovaError>>() + .collect::::Scalar>, NovaError>>() .expect("Nova error synthesis"); Ok(Self { @@ -420,7 +420,7 @@ where #[tracing::instrument(skip_all, name = "nova::RecursiveSNARK::prove_step")] pub fn prove_step( &mut self, - pp: &PublicParams, + pp: &PublicParams, c_primary: &C1, c_secondary: &C2, ) -> Result<(), NovaError> { @@ -434,7 +434,7 @@ where let (nifs_secondary, (r_U_secondary, r_W_secondary)) = NIFS::prove( &pp.ck_secondary, &pp.ro_consts_secondary, - &scalar_as_base::(pp.digest()), + &scalar_as_base::(pp.digest()), &pp.circuit_shape_secondary.r1cs_shape, &self.r_U_secondary, &self.r_W_secondary, @@ -443,18 +443,18 @@ where ) .expect("Unable to fold secondary"); - let mut cs_primary = SatisfyingAssignment::::new(); - let inputs_primary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( - scalar_as_base::(pp.digest()), - G1::Scalar::from(self.i as u64), + let mut cs_primary = SatisfyingAssignment::::new(); + let inputs_primary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( + scalar_as_base::(pp.digest()), + E1::Scalar::from(self.i as u64), self.z0_primary.to_vec(), Some(self.zi_primary.clone()), Some(self.r_U_secondary.clone()), Some(self.l_u_secondary.clone()), - Some(Commitment::::decompress(&nifs_secondary.comm_T)?), + Some(Commitment::::decompress(&nifs_secondary.comm_T)?), ); - let circuit_primary: NovaAugmentedCircuit<'_, G2, C1> = NovaAugmentedCircuit::new( + let circuit_primary: NovaAugmentedCircuit<'_, E2, C1> = NovaAugmentedCircuit::new( &pp.augmented_circuit_params_primary, Some(inputs_primary), c_primary, @@ -483,18 +483,18 @@ where ) .expect("Unable to fold primary"); - let mut cs_secondary = SatisfyingAssignment::::new(); - let inputs_secondary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( + let mut cs_secondary = SatisfyingAssignment::::new(); + let inputs_secondary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( pp.digest(), - G2::Scalar::from(self.i as u64), + E2::Scalar::from(self.i as u64), self.z0_secondary.to_vec(), Some(self.zi_secondary.clone()), Some(self.r_U_primary.clone()), Some(l_u_primary), - Some(Commitment::::decompress(&nifs_primary.comm_T)?), + Some(Commitment::::decompress(&nifs_primary.comm_T)?), ); - let circuit_secondary: NovaAugmentedCircuit<'_, G1, C2> = NovaAugmentedCircuit::new( + let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( &pp.augmented_circuit_params_secondary, Some(inputs_secondary), c_secondary, @@ -512,11 +512,11 @@ where self.zi_primary = zi_primary .iter() .map(|v| v.get_value().ok_or(NovaError::SynthesisError)) - .collect::::Scalar>, NovaError>>()?; + .collect::::Scalar>, NovaError>>()?; self.zi_secondary = zi_secondary .iter() .map(|v| v.get_value().ok_or(NovaError::SynthesisError)) - .collect::::Scalar>, NovaError>>()?; + .collect::::Scalar>, NovaError>>()?; self.l_u_secondary = l_u_secondary; self.l_w_secondary = l_w_secondary; @@ -535,11 +535,11 @@ where /// Verify the correctness of the `RecursiveSNARK` pub fn verify( &self, - pp: &PublicParams, + pp: &PublicParams, num_steps: usize, - z0_primary: &[G1::Scalar], - z0_secondary: &[G2::Scalar], - ) -> Result<(Vec, Vec), NovaError> { + z0_primary: &[E1::Scalar], + z0_secondary: &[E2::Scalar], + ) -> Result<(Vec, Vec), NovaError> { // number of steps cannot be zero let is_num_steps_zero = num_steps == 0; @@ -564,12 +564,12 @@ where // check if the output hashes in R1CS instances point to the right running instances let (hash_primary, hash_secondary) = { - let mut hasher = <::RO as ROTrait>::new( + let mut hasher = ::RO::new( pp.ro_consts_secondary.clone(), NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * pp.F_arity_primary, ); hasher.absorb(pp.digest()); - hasher.absorb(G1::Scalar::from(num_steps as u64)); + hasher.absorb(E1::Scalar::from(num_steps as u64)); for e in z0_primary { hasher.absorb(*e); } @@ -578,12 +578,12 @@ where } self.r_U_secondary.absorb_in_ro(&mut hasher); - let mut hasher2 = <::RO as ROTrait>::new( + let mut hasher2 = ::RO::new( pp.ro_consts_primary.clone(), NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * pp.F_arity_secondary, ); - hasher2.absorb(scalar_as_base::(pp.digest())); - hasher2.absorb(G2::Scalar::from(num_steps as u64)); + hasher2.absorb(scalar_as_base::(pp.digest())); + hasher2.absorb(E2::Scalar::from(num_steps as u64)); for e in z0_secondary { hasher2.absorb(*e); } @@ -599,7 +599,7 @@ where }; if hash_primary != self.l_u_secondary.X[0] - || hash_secondary != scalar_as_base::(self.l_u_secondary.X[1]) + || hash_secondary != scalar_as_base::(self.l_u_secondary.X[1]) { return Err(NovaError::ProofVerifyError); } @@ -646,14 +646,14 @@ where #[derive(Clone, Debug, Serialize, Deserialize, Abomonation)] #[serde(bound = "")] #[abomonation_omit_bounds] -pub struct ProverKey +pub struct ProverKey where - G1: Group::Scalar>, - G2: Group::Scalar>, - C1: StepCircuit, - C2: StepCircuit, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + C1: StepCircuit, + C2: StepCircuit, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait, { pk_primary: S1::ProverKey, pk_secondary: S2::ProverKey, @@ -664,30 +664,30 @@ where #[derive(Clone, Serialize, Deserialize, Abomonation)] #[serde(bound = "")] #[abomonation_bounds( + where + E1: Engine::Scalar>, + E2: Engine::Scalar>, + C1: StepCircuit, + C2: StepCircuit, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait, + ::Repr: Abomonation, + )] +pub struct VerifierKey where - G1: Group::Scalar>, - G2: Group::Scalar>, - C1: StepCircuit, - C2: StepCircuit, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait, - ::Repr: Abomonation, -)] -pub struct VerifierKey -where - G1: Group::Scalar>, - G2: Group::Scalar>, - C1: StepCircuit, - C2: StepCircuit, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + C1: StepCircuit, + C2: StepCircuit, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait, { F_arity_primary: usize, F_arity_secondary: usize, - ro_consts_primary: ROConstants, - ro_consts_secondary: ROConstants, - #[abomonate_with(::Repr)] - pp_digest: G1::Scalar, + ro_consts_primary: ROConstants, + ro_consts_secondary: ROConstants, + #[abomonate_with(::Repr)] + pp_digest: E1::Scalar, vk_primary: S1::VerifierKey, vk_secondary: S2::VerifierKey, _p: PhantomData<(C1, C2)>, @@ -696,45 +696,45 @@ where /// A SNARK that proves the knowledge of a valid `RecursiveSNARK` #[derive(Clone, Serialize, Deserialize)] #[serde(bound = "")] -pub struct CompressedSNARK +pub struct CompressedSNARK where - G1: Group::Scalar>, - G2: Group::Scalar>, - C1: StepCircuit, - C2: StepCircuit, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + C1: StepCircuit, + C2: StepCircuit, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait, { - r_U_primary: RelaxedR1CSInstance, + r_U_primary: RelaxedR1CSInstance, r_W_snark_primary: S1, - r_U_secondary: RelaxedR1CSInstance, - l_u_secondary: R1CSInstance, - nifs_secondary: NIFS, + r_U_secondary: RelaxedR1CSInstance, + l_u_secondary: R1CSInstance, + nifs_secondary: NIFS, f_W_snark_secondary: S2, - zn_primary: Vec, - zn_secondary: Vec, + zn_primary: Vec, + zn_secondary: Vec, _p: PhantomData<(C1, C2)>, } -impl CompressedSNARK +impl CompressedSNARK where - G1: Group::Scalar>, - G2: Group::Scalar>, - C1: StepCircuit, - C2: StepCircuit, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + C1: StepCircuit, + C2: StepCircuit, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait, { /// Creates prover and verifier keys for `CompressedSNARK` pub fn setup( - pp: &PublicParams, + pp: &PublicParams, ) -> Result< ( - ProverKey, - VerifierKey, + ProverKey, + VerifierKey, ), NovaError, > { @@ -764,15 +764,15 @@ where /// Create a new `CompressedSNARK` pub fn prove( - pp: &PublicParams, - pk: &ProverKey, - recursive_snark: &RecursiveSNARK, + pp: &PublicParams, + pk: &ProverKey, + recursive_snark: &RecursiveSNARK, ) -> Result { // fold the secondary circuit's instance with its running instance let (nifs_secondary, (f_U_secondary, f_W_secondary)) = NIFS::prove( &pp.ck_secondary, &pp.ro_consts_secondary, - &scalar_as_base::(pp.digest()), + &scalar_as_base::(pp.digest()), &pp.circuit_shape_secondary.r1cs_shape, &recursive_snark.r_U_secondary, &recursive_snark.r_W_secondary, @@ -821,11 +821,11 @@ where /// Verify the correctness of the `CompressedSNARK` pub fn verify( &self, - vk: &VerifierKey, + vk: &VerifierKey, num_steps: usize, - z0_primary: &[G1::Scalar], - z0_secondary: &[G2::Scalar], - ) -> Result<(Vec, Vec), NovaError> { + z0_primary: &[E1::Scalar], + z0_secondary: &[E2::Scalar], + ) -> Result<(Vec, Vec), NovaError> { // the number of steps cannot be zero if num_steps == 0 { return Err(NovaError::ProofVerifyError); @@ -841,12 +841,12 @@ where // check if the output hashes in R1CS instances point to the right running instances let (hash_primary, hash_secondary) = { - let mut hasher = <::RO as ROTrait>::new( + let mut hasher = ::RO::new( vk.ro_consts_secondary.clone(), NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * vk.F_arity_primary, ); hasher.absorb(vk.pp_digest); - hasher.absorb(G1::Scalar::from(num_steps as u64)); + hasher.absorb(E1::Scalar::from(num_steps as u64)); for e in z0_primary { hasher.absorb(*e); } @@ -855,12 +855,12 @@ where } self.r_U_secondary.absorb_in_ro(&mut hasher); - let mut hasher2 = <::RO as ROTrait>::new( + let mut hasher2 = ::RO::new( vk.ro_consts_primary.clone(), NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * vk.F_arity_secondary, ); - hasher2.absorb(scalar_as_base::(vk.pp_digest)); - hasher2.absorb(G2::Scalar::from(num_steps as u64)); + hasher2.absorb(scalar_as_base::(vk.pp_digest)); + hasher2.absorb(E2::Scalar::from(num_steps as u64)); for e in z0_secondary { hasher2.absorb(*e); } @@ -876,7 +876,7 @@ where }; if hash_primary != self.l_u_secondary.X[0] - || hash_secondary != scalar_as_base::(self.l_u_secondary.X[1]) + || hash_secondary != scalar_as_base::(self.l_u_secondary.X[1]) { return Err(NovaError::ProofVerifyError); } @@ -884,7 +884,7 @@ where // fold the secondary's running instance with the last instance to get a folded instance let f_U_secondary = self.nifs_secondary.verify( &vk.ro_consts_secondary, - &scalar_as_base::(vk.pp_digest), + &scalar_as_base::(vk.pp_digest), &self.r_U_secondary, &self.l_u_secondary, )?; @@ -916,61 +916,57 @@ where /// Note for callers: This function should be called with its performance characteristics in mind. /// It will synthesize and digest the full `circuit` given. pub fn circuit_digest< - G1: Group::Scalar>, - G2: Group::Scalar>, - C: StepCircuit, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + C: StepCircuit, >( circuit: &C, -) -> G1::Scalar { +) -> E1::Scalar { let augmented_circuit_params = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); // ro_consts_circuit are parameterized by G2 because the type alias uses G2::Base = G1::Scalar - let ro_consts_circuit: ROConstantsCircuit = ROConstantsCircuit::::default(); + let ro_consts_circuit: ROConstantsCircuit = ROConstantsCircuit::::default(); // Initialize ck for the primary - let augmented_circuit: NovaAugmentedCircuit<'_, G2, C> = + let augmented_circuit: NovaAugmentedCircuit<'_, E2, C> = NovaAugmentedCircuit::new(&augmented_circuit_params, None, circuit, ro_consts_circuit); - let mut cs: ShapeCS = ShapeCS::new(); + let mut cs: ShapeCS = ShapeCS::new(); let _ = augmented_circuit.synthesize(&mut cs); cs.r1cs_shape().digest() } -type CommitmentKey = <::CE as CommitmentEngineTrait>::CommitmentKey; -type Commitment = <::CE as CommitmentEngineTrait>::Commitment; -type CompressedCommitment = <<::CE as CommitmentEngineTrait>::Commitment as CommitmentTrait>::CompressedCommitment; -type CE = ::CE; +type CommitmentKey = <::CE as CommitmentEngineTrait>::CommitmentKey; +type Commitment = <::CE as CommitmentEngineTrait>::Commitment; +type CompressedCommitment = <<::CE as CommitmentEngineTrait>::Commitment as CommitmentTrait>::CompressedCommitment; +type CE = ::CE; #[cfg(test)] mod tests { + use super::*; use crate::{ provider::{ - bn256_grumpkin::{bn256, grumpkin}, - secp_secq::{secp256k1, secq256k1}, - GroupExt, + bn256_grumpkin::{Bn256Engine, GrumpkinEngine}, + pasta::{PallasEngine, VestaEngine}, + secp_secq::{Secp256k1Engine, Secq256k1Engine}, + DlogGroup, }, traits::{evaluation::EvaluationEngineTrait, snark::default_ck_hint}, }; - use core::fmt::Write; - - use super::*; - type EE = provider::ipa_pc::EvaluationEngine; - type S = spartan::snark::RelaxedR1CSSNARK; - type SPrime = spartan::ppsnark::RelaxedR1CSSNARK; - use ::bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; - use core::marker::PhantomData; + use core::{fmt::Write, marker::PhantomData}; use ff::PrimeField; use traits::circuit::TrivialCircuit; + type EE = provider::ipa_pc::EvaluationEngine; + type S = spartan::snark::RelaxedR1CSSNARK; + type SPrime = spartan::ppsnark::RelaxedR1CSSNARK; + #[derive(Clone, Debug, Default)] struct CubicCircuit { _p: PhantomData, } - impl StepCircuit for CubicCircuit - where - F: PrimeField, - { + impl StepCircuit for CubicCircuit { fn arity(&self) -> usize { 1 } @@ -1007,30 +1003,30 @@ mod tests { } } - impl CubicCircuit - where - F: PrimeField, - { + impl CubicCircuit { fn output(&self, z: &[F]) -> Vec { vec![z[0] * z[0] * z[0] + z[0] + F::from(5u64)] } } - fn test_pp_digest_with(circuit1: &T1, circuit2: &T2, _expected: &str) + fn test_pp_digest_with(circuit1: &T1, circuit2: &T2, expected: &str) where - G1: Group::Scalar> + GroupExt, - G2: Group::Scalar> + GroupExt, - T1: StepCircuit, - T2: StepCircuit, - E1: EvaluationEngineTrait, - E2: EvaluationEngineTrait, - ::Repr: Abomonation, - ::Repr: Abomonation, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + E1::GE: DlogGroup, + E2::GE: DlogGroup, + T1: StepCircuit, + T2: StepCircuit, + EE1: EvaluationEngineTrait, + EE2: EvaluationEngineTrait, + // this is due to the reliance on Abomonation + ::Repr: Abomonation, + ::Repr: Abomonation, { // this tests public parameters with a size specifically intended for a spark-compressed SNARK - let ck_hint1 = &*SPrime::::ck_floor(); - let ck_hint2 = &*SPrime::::ck_floor(); - let pp = PublicParams::::setup(circuit1, circuit2, ck_hint1, ck_hint2); + let ck_hint1 = &*SPrime::::ck_floor(); + let ck_hint2 = &*SPrime::::ck_floor(); + let pp = PublicParams::::setup(circuit1, circuit2, ck_hint1, ck_hint2); let digest_str = pp .digest() @@ -1041,92 +1037,89 @@ mod tests { let _ = write!(output, "{b:02x}"); output }); - println!("{:?}", digest_str); - // assert_eq!(digest_str, expected); + assert_eq!(digest_str, expected); } #[test] fn test_pp_digest() { - type G1 = pasta_curves::pallas::Point; - type G2 = pasta_curves::vesta::Point; - let trivial_circuit1 = TrivialCircuit::<::Scalar>::default(); - let trivial_circuit2 = TrivialCircuit::<::Scalar>::default(); - let cubic_circuit1 = CubicCircuit::<::Scalar>::default(); + let trivial_circuit1 = TrivialCircuit::<::Scalar>::default(); + let trivial_circuit2 = TrivialCircuit::<::Scalar>::default(); + let cubic_circuit1 = CubicCircuit::<::Scalar>::default(); - test_pp_digest_with::, EE<_>>( + test_pp_digest_with::, EE<_>>( &trivial_circuit1, &trivial_circuit2, - "cb581e2d5c4b2ef2ddbe2d6849e0da810352f59bcdaca51476dcf9e16072f100", + "f4a04841515b4721519e2671b7ee11e58e2d4a30bb183ded963b71ad2ec80d00", ); - test_pp_digest_with::, EE<_>>( + test_pp_digest_with::, EE<_>>( &cubic_circuit1, &trivial_circuit2, - "3cc29bb864910463e0501bac84cdefc1d4327e9c2ef5b0fd6d45ad1741f1a401", + "dc1b7c40ab50c5c6877ad027769452870cc28f1d13f140de7ca3a00138c58502", ); - let trivial_circuit1_grumpkin = TrivialCircuit::<::Scalar>::default(); - let trivial_circuit2_grumpkin = TrivialCircuit::<::Scalar>::default(); - let cubic_circuit1_grumpkin = CubicCircuit::<::Scalar>::default(); + let trivial_circuit1_grumpkin = TrivialCircuit::<::Scalar>::default(); + let trivial_circuit2_grumpkin = TrivialCircuit::<::Scalar>::default(); + let cubic_circuit1_grumpkin = CubicCircuit::<::Scalar>::default(); // These tests should not need be different on the "asm" feature for bn256. // See https://github.com/privacy-scaling-explorations/halo2curves/issues/100 for why they are - closing the issue there // should eliminate the discrepancy here. #[cfg(feature = "asm")] - test_pp_digest_with::, EE<_>>( + test_pp_digest_with::, EE<_>>( &trivial_circuit1_grumpkin, &trivial_circuit2_grumpkin, "c4ecd363a6c1473de7e0d24fc1dbb660f563556e2e13fb4614acdff04cab7701", ); #[cfg(feature = "asm")] - test_pp_digest_with::, EE<_>>( + test_pp_digest_with::, EE<_>>( &cubic_circuit1_grumpkin, &trivial_circuit2_grumpkin, "4853a6463b6309f6ae76442934d0a423f51f1e10abaddd0d39bf5644ed589100", ); #[cfg(not(feature = "asm"))] - test_pp_digest_with::, EE<_>>( + test_pp_digest_with::, EE<_>>( &trivial_circuit1_grumpkin, &trivial_circuit2_grumpkin, - "c26cc841d42c19bf98bc2482e66cd30903922f2a923927b85d66f375a821f101", + "c565748bea3336f07c8ff997c542ed62385ff5662f29402c4f9747153f699e01", ); #[cfg(not(feature = "asm"))] - test_pp_digest_with::, EE<_>>( + test_pp_digest_with::, EE<_>>( &cubic_circuit1_grumpkin, &trivial_circuit2_grumpkin, - "4c484cab71e93dda69b420beb7276af969c2034a7ffb0ea8e6964e96a7e5a901", + "5aec6defcb0f6b2bb14aec70362419388916d7a5bc528c0b3fabb197ae57cb03", ); - let trivial_circuit1_secp = TrivialCircuit::<::Scalar>::default(); - let trivial_circuit2_secp = TrivialCircuit::<::Scalar>::default(); - let cubic_circuit1_secp = CubicCircuit::<::Scalar>::default(); + let trivial_circuit1_secp = TrivialCircuit::<::Scalar>::default(); + let trivial_circuit2_secp = TrivialCircuit::<::Scalar>::default(); + let cubic_circuit1_secp = CubicCircuit::<::Scalar>::default(); - test_pp_digest_with::, EE<_>>( + test_pp_digest_with::, EE<_>>( &trivial_circuit1_secp, &trivial_circuit2_secp, - "b794d655fb39891eaf530ca3be1ec2a5ac97f72a0d07c45dbb84529d8a611502", + "66c6d3618bb824bcb9253b7731247b89432853bf2014ffae45a8f6b00befe303", ); - test_pp_digest_with::, EE<_>>( + test_pp_digest_with::, EE<_>>( &cubic_circuit1_secp, &trivial_circuit2_secp, - "50e6acf363c31c2ac1c9c646b4494cb21aae6cb648c7b0d4c95015c811fba302", + "cc22c270460e11d190235fbd691bdeec51e8200219e5e65112e48bb80b610803", ); } - fn test_ivc_trivial_with() + fn test_ivc_trivial_with() where - G1: Group::Scalar>, - G2: Group::Scalar>, + E1: Engine::Scalar>, + E2: Engine::Scalar>, { - let test_circuit1 = TrivialCircuit::<::Scalar>::default(); - let test_circuit2 = TrivialCircuit::<::Scalar>::default(); + let test_circuit1 = TrivialCircuit::<::Scalar>::default(); + let test_circuit2 = TrivialCircuit::<::Scalar>::default(); // produce public parameters let pp = PublicParams::< - G1, - G2, - TrivialCircuit<::Scalar>, - TrivialCircuit<::Scalar>, + E1, + E2, + TrivialCircuit<::Scalar>, + TrivialCircuit<::Scalar>, >::setup( &test_circuit1, &test_circuit2, @@ -1140,8 +1133,8 @@ mod tests { &pp, &test_circuit1, &test_circuit2, - &[::Scalar::ZERO], - &[::Scalar::ZERO], + &[::Scalar::ZERO], + &[::Scalar::ZERO], ) .unwrap(); @@ -1153,36 +1146,33 @@ mod tests { let res = recursive_snark.verify( &pp, num_steps, - &[::Scalar::ZERO], - &[::Scalar::ZERO], + &[::Scalar::ZERO], + &[::Scalar::ZERO], ); assert!(res.is_ok()); } #[test] fn test_ivc_trivial() { - type G1 = pasta_curves::pallas::Point; - type G2 = pasta_curves::vesta::Point; - - test_ivc_trivial_with::(); - test_ivc_trivial_with::(); - test_ivc_trivial_with::(); + test_ivc_trivial_with::(); + test_ivc_trivial_with::(); + test_ivc_trivial_with::(); } - fn test_ivc_nontrivial_with() + fn test_ivc_nontrivial_with() where - G1: Group::Scalar>, - G2: Group::Scalar>, + E1: Engine::Scalar>, + E2: Engine::Scalar>, { let circuit_primary = TrivialCircuit::default(); let circuit_secondary = CubicCircuit::default(); // produce public parameters let pp = PublicParams::< - G1, - G2, - TrivialCircuit<::Scalar>, - CubicCircuit<::Scalar>, + E1, + E2, + TrivialCircuit<::Scalar>, + CubicCircuit<::Scalar>, >::setup( &circuit_primary, &circuit_secondary, @@ -1194,16 +1184,16 @@ mod tests { // produce a recursive SNARK let mut recursive_snark = RecursiveSNARK::< - G1, - G2, - TrivialCircuit<::Scalar>, - CubicCircuit<::Scalar>, + E1, + E2, + TrivialCircuit<::Scalar>, + CubicCircuit<::Scalar>, >::new( &pp, &circuit_primary, &circuit_secondary, - &[::Scalar::ONE], - &[::Scalar::ZERO], + &[::Scalar::ONE], + &[::Scalar::ZERO], ) .unwrap(); @@ -1215,8 +1205,8 @@ mod tests { let res = recursive_snark.verify( &pp, i + 1, - &[::Scalar::ONE], - &[::Scalar::ZERO], + &[::Scalar::ONE], + &[::Scalar::ZERO], ); assert!(res.is_ok()); } @@ -1225,52 +1215,49 @@ mod tests { let res = recursive_snark.verify( &pp, num_steps, - &[::Scalar::ONE], - &[::Scalar::ZERO], + &[::Scalar::ONE], + &[::Scalar::ZERO], ); assert!(res.is_ok()); let (zn_primary, zn_secondary) = res.unwrap(); // sanity: check the claimed output with a direct computation of the same - assert_eq!(zn_primary, vec![::Scalar::ONE]); - let mut zn_secondary_direct = vec![::Scalar::ZERO]; + assert_eq!(zn_primary, vec![::Scalar::ONE]); + let mut zn_secondary_direct = vec![::Scalar::ZERO]; for _i in 0..num_steps { zn_secondary_direct = circuit_secondary.clone().output(&zn_secondary_direct); } assert_eq!(zn_secondary, zn_secondary_direct); - assert_eq!(zn_secondary, vec![::Scalar::from(2460515u64)]); + assert_eq!(zn_secondary, vec![::Scalar::from(2460515u64)]); } #[test] fn test_ivc_nontrivial() { - type G1 = pasta_curves::pallas::Point; - type G2 = pasta_curves::vesta::Point; - - test_ivc_nontrivial_with::(); - test_ivc_nontrivial_with::(); - test_ivc_nontrivial_with::(); + test_ivc_nontrivial_with::(); + test_ivc_nontrivial_with::(); + test_ivc_nontrivial_with::(); } - fn test_ivc_nontrivial_with_compression_with() + fn test_ivc_nontrivial_with_compression_with() where - G1: Group::Scalar>, - G2: Group::Scalar>, - E1: EvaluationEngineTrait, - E2: EvaluationEngineTrait, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + EE1: EvaluationEngineTrait, + EE2: EvaluationEngineTrait, // this is due to the reliance on Abomonation - <::Scalar as PrimeField>::Repr: Abomonation, - <::Scalar as PrimeField>::Repr: Abomonation, + ::Repr: Abomonation, + ::Repr: Abomonation, { let circuit_primary = TrivialCircuit::default(); let circuit_secondary = CubicCircuit::default(); // produce public parameters let pp = PublicParams::< - G1, - G2, - TrivialCircuit<::Scalar>, - CubicCircuit<::Scalar>, + E1, + E2, + TrivialCircuit<::Scalar>, + CubicCircuit<::Scalar>, >::setup( &circuit_primary, &circuit_secondary, @@ -1282,16 +1269,16 @@ mod tests { // produce a recursive SNARK let mut recursive_snark = RecursiveSNARK::< - G1, - G2, - TrivialCircuit<::Scalar>, - CubicCircuit<::Scalar>, + E1, + E2, + TrivialCircuit<::Scalar>, + CubicCircuit<::Scalar>, >::new( &pp, &circuit_primary, &circuit_secondary, - &[::Scalar::ONE], - &[::Scalar::ZERO], + &[::Scalar::ONE], + &[::Scalar::ZERO], ) .unwrap(); @@ -1304,28 +1291,28 @@ mod tests { let res = recursive_snark.verify( &pp, num_steps, - &[::Scalar::ONE], - &[::Scalar::ZERO], + &[::Scalar::ONE], + &[::Scalar::ZERO], ); assert!(res.is_ok()); let (zn_primary, zn_secondary) = res.unwrap(); // sanity: check the claimed output with a direct computation of the same - assert_eq!(zn_primary, vec![::Scalar::ONE]); - let mut zn_secondary_direct = vec![::Scalar::ZERO]; + assert_eq!(zn_primary, vec![::Scalar::ONE]); + let mut zn_secondary_direct = vec![::Scalar::ZERO]; for _i in 0..num_steps { zn_secondary_direct = circuit_secondary.clone().output(&zn_secondary_direct); } assert_eq!(zn_secondary, zn_secondary_direct); - assert_eq!(zn_secondary, vec![::Scalar::from(2460515u64)]); + assert_eq!(zn_secondary, vec![::Scalar::from(2460515u64)]); // produce the prover and verifier keys for compressed snark - let (pk, vk) = CompressedSNARK::<_, _, _, _, S, S>::setup(&pp).unwrap(); + let (pk, vk) = CompressedSNARK::<_, _, _, _, S, S>::setup(&pp).unwrap(); // produce a compressed SNARK let res = - CompressedSNARK::<_, _, _, _, S, S>::prove(&pp, &pk, &recursive_snark); + CompressedSNARK::<_, _, _, _, S, S>::prove(&pp, &pk, &recursive_snark); assert!(res.is_ok()); let compressed_snark = res.unwrap(); @@ -1333,62 +1320,59 @@ mod tests { let res = compressed_snark.verify( &vk, num_steps, - &[::Scalar::ONE], - &[::Scalar::ZERO], + &[::Scalar::ONE], + &[::Scalar::ZERO], ); assert!(res.is_ok()); } #[test] fn test_ivc_nontrivial_with_compression() { - type G1 = pasta_curves::pallas::Point; - type G2 = pasta_curves::vesta::Point; - - test_ivc_nontrivial_with_compression_with::, EE<_>>(); - test_ivc_nontrivial_with_compression_with::, EE<_>>(); - test_ivc_nontrivial_with_compression_with::, EE<_>>(); + test_ivc_nontrivial_with_compression_with::, EE<_>>(); + test_ivc_nontrivial_with_compression_with::, EE<_>>(); + test_ivc_nontrivial_with_compression_with::, EE<_>>(); } - fn test_ivc_nontrivial_with_spark_compression_with() + fn test_ivc_nontrivial_with_spark_compression_with() where - G1: Group::Scalar>, - G2: Group::Scalar>, - E1: EvaluationEngineTrait, - E2: EvaluationEngineTrait, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + EE1: EvaluationEngineTrait, + EE2: EvaluationEngineTrait, // this is due to the reliance on Abomonation - <::Scalar as PrimeField>::Repr: Abomonation, - <::Scalar as PrimeField>::Repr: Abomonation, + ::Repr: Abomonation, + ::Repr: Abomonation, { let circuit_primary = TrivialCircuit::default(); let circuit_secondary = CubicCircuit::default(); // produce public parameters, which we'll use with a spark-compressed SNARK let pp = PublicParams::< - G1, - G2, - TrivialCircuit<::Scalar>, - CubicCircuit<::Scalar>, + E1, + E2, + TrivialCircuit<::Scalar>, + CubicCircuit<::Scalar>, >::setup( &circuit_primary, &circuit_secondary, - &*SPrime::<_, E1>::ck_floor(), - &*SPrime::<_, E2>::ck_floor(), + &*SPrime::::ck_floor(), + &*SPrime::::ck_floor(), ); let num_steps = 3; // produce a recursive SNARK let mut recursive_snark = RecursiveSNARK::< - G1, - G2, - TrivialCircuit<::Scalar>, - CubicCircuit<::Scalar>, + E1, + E2, + TrivialCircuit<::Scalar>, + CubicCircuit<::Scalar>, >::new( &pp, &circuit_primary, &circuit_secondary, - &[::Scalar::ONE], - &[::Scalar::ZERO], + &[::Scalar::ONE], + &[::Scalar::ZERO], ) .unwrap(); @@ -1401,29 +1385,29 @@ mod tests { let res = recursive_snark.verify( &pp, num_steps, - &[::Scalar::ONE], - &[::Scalar::ZERO], + &[::Scalar::ONE], + &[::Scalar::ZERO], ); assert!(res.is_ok()); let (zn_primary, zn_secondary) = res.unwrap(); // sanity: check the claimed output with a direct computation of the same - assert_eq!(zn_primary, vec![::Scalar::ONE]); - let mut zn_secondary_direct = vec![::Scalar::ZERO]; + assert_eq!(zn_primary, vec![::Scalar::ONE]); + let mut zn_secondary_direct = vec![::Scalar::ZERO]; for _i in 0..num_steps { zn_secondary_direct = CubicCircuit::default().output(&zn_secondary_direct); } assert_eq!(zn_secondary, zn_secondary_direct); - assert_eq!(zn_secondary, vec![::Scalar::from(2460515u64)]); + assert_eq!(zn_secondary, vec![::Scalar::from(2460515u64)]); // run the compressed snark with Spark compiler // produce the prover and verifier keys for compressed snark let (pk, vk) = - CompressedSNARK::<_, _, _, _, SPrime, SPrime>::setup(&pp).unwrap(); + CompressedSNARK::<_, _, _, _, SPrime, SPrime>::setup(&pp).unwrap(); // produce a compressed SNARK - let res = CompressedSNARK::<_, _, _, _, SPrime, SPrime>::prove( + let res = CompressedSNARK::<_, _, _, _, SPrime, SPrime>::prove( &pp, &pk, &recursive_snark, @@ -1435,37 +1419,29 @@ mod tests { let res = compressed_snark.verify( &vk, num_steps, - &[::Scalar::ONE], - &[::Scalar::ZERO], + &[::Scalar::ONE], + &[::Scalar::ZERO], ); assert!(res.is_ok()); } #[test] fn test_ivc_nontrivial_with_spark_compression() { - type G1 = pasta_curves::pallas::Point; - type G2 = pasta_curves::vesta::Point; - - test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); - test_ivc_nontrivial_with_spark_compression_with::, EE<_>>( + test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); + test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); + test_ivc_nontrivial_with_spark_compression_with::, EE<_>>( ); - test_ivc_nontrivial_with_spark_compression_with::< - secp256k1::Point, - secq256k1::Point, - EE<_>, - EE<_>, - >(); } - fn test_ivc_nondet_with_compression_with() + fn test_ivc_nondet_with_compression_with() where - G1: Group::Scalar>, - G2: Group::Scalar>, - E1: EvaluationEngineTrait, - E2: EvaluationEngineTrait, + E1: Engine::Scalar>, + E2: Engine::Scalar>, + EE1: EvaluationEngineTrait, + EE2: EvaluationEngineTrait, // this is due to the reliance on Abomonation - <::Scalar as PrimeField>::Repr: Abomonation, - <::Scalar as PrimeField>::Repr: Abomonation, + ::Repr: Abomonation, + ::Repr: Abomonation, { // y is a non-deterministic advice representing the fifth root of the input at a step. #[derive(Clone, Debug)] @@ -1473,10 +1449,7 @@ mod tests { y: F, } - impl FifthRootCheckingCircuit - where - F: PrimeField, - { + impl FifthRootCheckingCircuit { fn new(num_steps: usize) -> (Vec, Vec) { let mut powers = Vec::new(); let rng = &mut rand::rngs::OsRng; @@ -1528,17 +1501,17 @@ mod tests { } let circuit_primary = FifthRootCheckingCircuit { - y: ::Scalar::ZERO, + y: ::Scalar::ZERO, }; let circuit_secondary = TrivialCircuit::default(); // produce public parameters let pp = PublicParams::< - G1, - G2, - FifthRootCheckingCircuit<::Scalar>, - TrivialCircuit<::Scalar>, + E1, + E2, + FifthRootCheckingCircuit<::Scalar>, + TrivialCircuit<::Scalar>, >::setup( &circuit_primary, &circuit_secondary, @@ -1550,19 +1523,19 @@ mod tests { // produce non-deterministic advice let (z0_primary, roots) = FifthRootCheckingCircuit::new(num_steps); - let z0_secondary = vec![::Scalar::ZERO]; + let z0_secondary = vec![::Scalar::ZERO]; // produce a recursive SNARK let mut recursive_snark: RecursiveSNARK< - G1, - G2, - FifthRootCheckingCircuit<::Scalar>, - TrivialCircuit<::Scalar>, + E1, + E2, + FifthRootCheckingCircuit<::Scalar>, + TrivialCircuit<::Scalar>, > = RecursiveSNARK::< - G1, - G2, - FifthRootCheckingCircuit<::Scalar>, - TrivialCircuit<::Scalar>, + E1, + E2, + FifthRootCheckingCircuit<::Scalar>, + TrivialCircuit<::Scalar>, >::new( &pp, &roots[0], @@ -1582,11 +1555,11 @@ mod tests { assert!(res.is_ok()); // produce the prover and verifier keys for compressed snark - let (pk, vk) = CompressedSNARK::<_, _, _, _, S, S>::setup(&pp).unwrap(); + let (pk, vk) = CompressedSNARK::<_, _, _, _, S, S>::setup(&pp).unwrap(); // produce a compressed SNARK let res = - CompressedSNARK::<_, _, _, _, S, S>::prove(&pp, &pk, &recursive_snark); + CompressedSNARK::<_, _, _, _, S, S>::prove(&pp, &pk, &recursive_snark); assert!(res.is_ok()); let compressed_snark = res.unwrap(); @@ -1597,28 +1570,25 @@ mod tests { #[test] fn test_ivc_nondet_with_compression() { - type G1 = pasta_curves::pallas::Point; - type G2 = pasta_curves::vesta::Point; - - test_ivc_nondet_with_compression_with::, EE<_>>(); - test_ivc_nondet_with_compression_with::, EE<_>>(); - test_ivc_nondet_with_compression_with::, EE<_>>(); + test_ivc_nondet_with_compression_with::, EE<_>>(); + test_ivc_nondet_with_compression_with::, EE<_>>(); + test_ivc_nondet_with_compression_with::, EE<_>>(); } - fn test_ivc_base_with() + fn test_ivc_base_with() where - G1: Group::Scalar>, - G2: Group::Scalar>, + E1: Engine::Scalar>, + E2: Engine::Scalar>, { - let test_circuit1 = TrivialCircuit::<::Scalar>::default(); - let test_circuit2 = CubicCircuit::<::Scalar>::default(); + let test_circuit1 = TrivialCircuit::<::Scalar>::default(); + let test_circuit2 = CubicCircuit::<::Scalar>::default(); // produce public parameters let pp = PublicParams::< - G1, - G2, - TrivialCircuit<::Scalar>, - CubicCircuit<::Scalar>, + E1, + E2, + TrivialCircuit<::Scalar>, + CubicCircuit<::Scalar>, >::setup( &test_circuit1, &test_circuit2, @@ -1630,16 +1600,16 @@ mod tests { // produce a recursive SNARK let mut recursive_snark = RecursiveSNARK::< - G1, - G2, - TrivialCircuit<::Scalar>, - CubicCircuit<::Scalar>, + E1, + E2, + TrivialCircuit<::Scalar>, + CubicCircuit<::Scalar>, >::new( &pp, &test_circuit1, &test_circuit2, - &[::Scalar::ONE], - &[::Scalar::ZERO], + &[::Scalar::ONE], + &[::Scalar::ZERO], ) .unwrap(); @@ -1652,24 +1622,21 @@ mod tests { let res = recursive_snark.verify( &pp, num_steps, - &[::Scalar::ONE], - &[::Scalar::ZERO], + &[::Scalar::ONE], + &[::Scalar::ZERO], ); assert!(res.is_ok()); let (zn_primary, zn_secondary) = res.unwrap(); - assert_eq!(zn_primary, vec![::Scalar::ONE]); - assert_eq!(zn_secondary, vec![::Scalar::from(5u64)]); + assert_eq!(zn_primary, vec![::Scalar::ONE]); + assert_eq!(zn_secondary, vec![::Scalar::from(5u64)]); } #[test] fn test_ivc_base() { - type G1 = pasta_curves::pallas::Point; - type G2 = pasta_curves::vesta::Point; - - test_ivc_base_with::(); - test_ivc_base_with::(); - test_ivc_base_with::(); + test_ivc_base_with::(); + test_ivc_base_with::(); + test_ivc_base_with::(); } } diff --git a/src/nifs.rs b/src/nifs.rs index 0913dc942..d82681c6a 100644 --- a/src/nifs.rs +++ b/src/nifs.rs @@ -6,7 +6,7 @@ use crate::{ errors::NovaError, r1cs::{R1CSInstance, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness}, scalar_as_base, - traits::{commitment::CommitmentTrait, AbsorbInROTrait, Group, ROTrait}, + traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROTrait}, Commitment, CommitmentKey, CompressedCommitment, }; use serde::{Deserialize, Serialize}; @@ -15,14 +15,14 @@ use serde::{Deserialize, Serialize}; #[allow(clippy::upper_case_acronyms)] #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "")] -pub struct NIFS { - pub(crate) comm_T: CompressedCommitment, +pub struct NIFS { + pub(crate) comm_T: CompressedCommitment, } -type ROConstants = - <::RO as ROTrait<::Base, ::Scalar>>::Constants; +type ROConstants = + <::RO as ROTrait<::Base, ::Scalar>>::Constants; -impl NIFS { +impl NIFS { /// Takes as input a Relaxed R1CS instance-witness tuple `(U1, W1)` and /// an R1CS instance-witness tuple `(U2, W2)` with the same structure `shape` /// and defined with respect to the same `ck`, and outputs @@ -32,20 +32,20 @@ impl NIFS { #[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all, level = "trace", name = "NIFS::prove")] pub fn prove( - ck: &CommitmentKey, - ro_consts: &ROConstants, - pp_digest: &G::Scalar, - S: &R1CSShape, - U1: &RelaxedR1CSInstance, - W1: &RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) -> Result<(NIFS, (RelaxedR1CSInstance, RelaxedR1CSWitness)), NovaError> { + ck: &CommitmentKey, + ro_consts: &ROConstants, + pp_digest: &E::Scalar, + S: &R1CSShape, + U1: &RelaxedR1CSInstance, + W1: &RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) -> Result<(NIFS, (RelaxedR1CSInstance, RelaxedR1CSWitness)), NovaError> { // initialize a new RO - let mut ro = G::RO::new(ro_consts.clone(), NUM_FE_FOR_RO); + let mut ro = E::RO::new(ro_consts.clone(), NUM_FE_FOR_RO); // append the digest of pp to the transcript - ro.absorb(scalar_as_base::(*pp_digest)); + ro.absorb(scalar_as_base::(*pp_digest)); // append U1 and U2 to transcript U1.absorb_in_ro(&mut ro); @@ -82,23 +82,23 @@ impl NIFS { /// if and only if `U1` and `U2` are satisfiable. pub fn verify( &self, - ro_consts: &ROConstants, - pp_digest: &G::Scalar, - U1: &RelaxedR1CSInstance, - U2: &R1CSInstance, - ) -> Result, NovaError> { + ro_consts: &ROConstants, + pp_digest: &E::Scalar, + U1: &RelaxedR1CSInstance, + U2: &R1CSInstance, + ) -> Result, NovaError> { // initialize a new RO - let mut ro = G::RO::new(ro_consts.clone(), NUM_FE_FOR_RO); + let mut ro = E::RO::new(ro_consts.clone(), NUM_FE_FOR_RO); // append the digest of pp to the transcript - ro.absorb(scalar_as_base::(*pp_digest)); + ro.absorb(scalar_as_base::(*pp_digest)); // append U1 and U2 to transcript U1.absorb_in_ro(&mut ro); U2.absorb_in_ro(&mut ro); // append `comm_T` to the transcript and obtain a challenge - let comm_T = Commitment::::decompress(&self.comm_T)?; + let comm_T = Commitment::::decompress(&self.comm_T)?; comm_T.absorb_in_ro(&mut ro); // compute a challenge from the RO @@ -116,16 +116,19 @@ impl NIFS { mod tests { use super::*; use crate::{ - r1cs::{commitment_key, SparseMatrix}, - traits::snark::default_ck_hint, - traits::{commitment::CommitmentEngineTrait, Group}, + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + solver::SatisfyingAssignment, + test_shape_cs::TestShapeCS, + }, + provider::{bn256_grumpkin::Bn256Engine, pasta::PallasEngine, secp_secq::Secp256k1Engine}, + r1cs::{SparseMatrix, commitment_key}, + traits::{snark::default_ck_hint, Engine}, }; use ::bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; use ff::{Field, PrimeField}; use rand::rngs::OsRng; - type G = pasta_curves::pallas::Point; - fn synthesize_tiny_r1cs_bellpepper>( cs: &mut CS, x_val: Option, @@ -159,34 +162,25 @@ mod tests { Ok(()) } - fn test_tiny_r1cs_bellpepper_with() - where - G: Group, - { - use crate::bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - solver::SatisfyingAssignment, - test_shape_cs::TestShapeCS, - }; - + fn test_tiny_r1cs_bellpepper_with() { // First create the shape - let mut cs: TestShapeCS = TestShapeCS::new(); + let mut cs: TestShapeCS = TestShapeCS::new(); let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, None); let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); let ro_consts = - <::RO as ROTrait<::Base, ::Scalar>>::Constants::default(); + <::RO as ROTrait<::Base, ::Scalar>>::Constants::default(); // Now get the instance and assignment for one instance - let mut cs = SatisfyingAssignment::::new(); - let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, Some(G::Scalar::from(5))); + let mut cs = SatisfyingAssignment::::new(); + let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, Some(E::Scalar::from(5))); let (U1, W1) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); // Make sure that the first instance is satisfiable assert!(shape.is_sat(&ck, &U1, &W1).is_ok()); // Now get the instance and assignment for second instance - let mut cs = SatisfyingAssignment::::new(); - let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, Some(G::Scalar::from(135))); + let mut cs = SatisfyingAssignment::::new(); + let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, Some(E::Scalar::from(135))); let (U2, W2) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); // Make sure that the second instance is satisfiable @@ -196,7 +190,7 @@ mod tests { execute_sequence( &ck, &ro_consts, - &::Scalar::ZERO, + &::Scalar::ZERO, &shape, &U1, &W1, @@ -207,23 +201,21 @@ mod tests { #[test] fn test_tiny_r1cs_bellpepper() { - test_tiny_r1cs_bellpepper_with::(); - - test_tiny_r1cs_bellpepper_with::(); + test_tiny_r1cs_bellpepper_with::(); + test_tiny_r1cs_bellpepper_with::(); + test_tiny_r1cs_bellpepper_with::(); } - fn execute_sequence( - ck: &CommitmentKey, - ro_consts: &<::RO as ROTrait<::Base, ::Scalar>>::Constants, - pp_digest: &::Scalar, - shape: &R1CSShape, - U1: &R1CSInstance, - W1: &R1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) where - G: Group, - { + fn execute_sequence( + ck: &CommitmentKey, + ro_consts: &<::RO as ROTrait<::Base, ::Scalar>>::Constants, + pp_digest: &::Scalar, + shape: &R1CSShape, + U1: &R1CSInstance, + W1: &R1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) { // produce a default running instance let mut r_W = RelaxedR1CSWitness::default(shape); let mut r_U = RelaxedR1CSInstance::default(ck, shape); @@ -264,8 +256,8 @@ mod tests { assert!(shape.is_sat_relaxed(ck, &r_U, &r_W).is_ok()); } - fn test_tiny_r1cs_with() { - let one = ::ONE; + fn test_tiny_r1cs_with() { + let one = ::ONE; let (num_cons, num_vars, num_io, A, B, C) = { let num_cons = 4; let num_vars = 3; @@ -282,9 +274,9 @@ mod tests { // constraint and a column for every entry in z = (vars, u, inputs) // An R1CS instance is satisfiable iff: // Az \circ Bz = u \cdot Cz + E, where z = (vars, 1, inputs) - let mut A: Vec<(usize, usize, G::Scalar)> = Vec::new(); - let mut B: Vec<(usize, usize, G::Scalar)> = Vec::new(); - let mut C: Vec<(usize, usize, G::Scalar)> = Vec::new(); + let mut A: Vec<(usize, usize, E::Scalar)> = Vec::new(); + let mut B: Vec<(usize, usize, E::Scalar)> = Vec::new(); + let mut C: Vec<(usize, usize, E::Scalar)> = Vec::new(); // constraint 0 entries in (A,B,C) // `I0 * I0 - Z0 = 0` @@ -333,13 +325,12 @@ mod tests { }; // generate generators and ro constants - let ck: <::CE as CommitmentEngineTrait>::CommitmentKey = - commitment_key(&S, &*default_ck_hint()); + let ck = commitment_key(&S, &*default_ck_hint()); let ro_consts = - <::RO as ROTrait<::Base, ::Scalar>>::Constants::default(); + <::RO as ROTrait<::Base, ::Scalar>>::Constants::default(); let rand_inst_witness_generator = - |ck: &CommitmentKey, I: &G::Scalar| -> (G::Scalar, R1CSInstance, R1CSWitness) { + |ck: &CommitmentKey, I: &E::Scalar| -> (E::Scalar, R1CSInstance, R1CSWitness) { let i0 = *I; // compute a satisfying (vars, X) tuple @@ -374,7 +365,7 @@ mod tests { }; let mut csprng: OsRng = OsRng; - let I = G::Scalar::random(&mut csprng); // the first input is picked randomly for the first instance + let I = E::Scalar::random(&mut csprng); // the first input is picked randomly for the first instance let (O, U1, W1) = rand_inst_witness_generator(&ck, &I); let (_O, U2, W2) = rand_inst_witness_generator(&ck, &O); @@ -382,7 +373,7 @@ mod tests { execute_sequence( &ck, &ro_consts, - &::Scalar::ZERO, + &::Scalar::ZERO, &S, &U1, &W1, @@ -393,8 +384,8 @@ mod tests { #[test] fn test_tiny_r1cs() { - test_tiny_r1cs_with::(); - test_tiny_r1cs_with::(); - test_tiny_r1cs_with::(); + test_tiny_r1cs_with::(); + test_tiny_r1cs_with::(); + test_tiny_r1cs_with::(); } } diff --git a/src/provider/bn256_grumpkin.rs b/src/provider/bn256_grumpkin.rs index fc1eddfe4..d1831bb8d 100644 --- a/src/provider/bn256_grumpkin.rs +++ b/src/provider/bn256_grumpkin.rs @@ -6,9 +6,9 @@ use crate::{ keccak::Keccak256Transcript, pedersen::CommitmentEngine, poseidon::{PoseidonRO, PoseidonROCircuit}, - CompressedGroup, GroupExt, + CompressedGroup, DlogGroup, }, - traits::{Group, PrimeFieldExt, TranscriptReprTrait}, + traits::{Engine, Group, PrimeFieldExt, TranscriptReprTrait}, }; use digest::{ExtendableOutput, Update}; use ff::{FromUniformBytes, PrimeField}; @@ -42,7 +42,16 @@ pub mod grumpkin { }; } +/// An implementation of the Nova `Engine` trait with BN254 curve and Pedersen commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Bn256Engine; + +/// An implementation of the Nova `Engine` trait with Grumpkin curve and Pedersen commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct GrumpkinEngine; + impl_traits!( + Bn256Engine, bn256, Bn256Compressed, Bn256Point, @@ -52,6 +61,7 @@ impl_traits!( ); impl_traits!( + GrumpkinEngine, grumpkin, GrumpkinCompressed, GrumpkinPoint, @@ -85,7 +95,7 @@ mod tests { for n in [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1021, ] { - let ck_par = ::from_label(label, n); + let ck_par = ::from_label(label, n); let ck_ser = from_label_serial(label, n); assert_eq!(ck_par.len(), n); assert_eq!(ck_ser.len(), n); diff --git a/src/provider/ipa_pc.rs b/src/provider/ipa_pc.rs index 3025e162b..07e5eea16 100644 --- a/src/provider/ipa_pc.rs +++ b/src/provider/ipa_pc.rs @@ -1,12 +1,12 @@ //! This module implements `EvaluationEngine` using an IPA-based polynomial commitment scheme use crate::{ errors::NovaError, - provider::{pedersen::CommitmentKeyExtTrait, GroupExt}, + provider::{pedersen::CommitmentKeyExtTrait, DlogGroup}, spartan::polys::eq::EqPolynomial, traits::{ commitment::{CommitmentEngineTrait, CommitmentTrait}, evaluation::EvaluationEngineTrait, - Group, TranscriptEngineTrait, TranscriptReprTrait, + Engine, TranscriptEngineTrait, TranscriptReprTrait, }, Commitment, CommitmentKey, CompressedCommitment, CE, }; @@ -21,38 +21,39 @@ use std::marker::PhantomData; #[derive(Clone, Debug, Serialize, Deserialize, Abomonation)] #[serde(bound = "")] #[abomonation_omit_bounds] -pub struct ProverKey { - ck_s: CommitmentKey, +pub struct ProverKey { + ck_s: CommitmentKey, } /// Provides an implementation of the verifier key #[derive(Clone, Debug, Serialize, Deserialize, Abomonation)] #[serde(bound = "")] #[abomonation_omit_bounds] -pub struct VerifierKey { - ck_v: CommitmentKey, - ck_s: CommitmentKey, +pub struct VerifierKey { + ck_v: CommitmentKey, + ck_s: CommitmentKey, } /// Provides an implementation of a polynomial evaluation engine using IPA #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct EvaluationEngine { - _p: PhantomData, +pub struct EvaluationEngine { + _p: PhantomData, } -impl EvaluationEngineTrait for EvaluationEngine +impl EvaluationEngineTrait for EvaluationEngine where - G: GroupExt, - CommitmentKey: CommitmentKeyExtTrait, + E: Engine, + E::GE: DlogGroup, + CommitmentKey: CommitmentKeyExtTrait, { - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; - type EvaluationArgument = InnerProductArgument; + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + type EvaluationArgument = InnerProductArgument; fn setup( - ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, + ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, ) -> (Self::ProverKey, Self::VerifierKey) { - let ck_c = G::CE::setup(b"ipa", 1); + let ck_c = E::CE::setup(b"ipa", 1); let pk = ProverKey { ck_s: ck_c.clone() }; let vk = VerifierKey { @@ -64,13 +65,13 @@ where } fn prove( - ck: &CommitmentKey, + ck: &CommitmentKey, pk: &Self::ProverKey, - transcript: &mut G::TE, - comm: &Commitment, - poly: &[G::Scalar], - point: &[G::Scalar], - eval: &G::Scalar, + transcript: &mut E::TE, + comm: &Commitment, + poly: &[E::Scalar], + point: &[E::Scalar], + eval: &E::Scalar, ) -> Result { let u = InnerProductInstance::new(comm, &EqPolynomial::new(point.to_vec()).evals(), eval); let w = InnerProductWitness::new(poly); @@ -81,10 +82,10 @@ where /// A method to verify purported evaluations of a batch of polynomials fn verify( vk: &Self::VerifierKey, - transcript: &mut G::TE, - comm: &Commitment, - point: &[G::Scalar], - eval: &G::Scalar, + transcript: &mut E::TE, + comm: &Commitment, + point: &[E::Scalar], + eval: &E::Scalar, arg: &Self::EvaluationArgument, ) -> Result<(), NovaError> { let u = InnerProductInstance::new(comm, &EqPolynomial::new(point.to_vec()).evals(), eval); @@ -101,10 +102,7 @@ where } } -fn inner_product(a: &[T], b: &[T]) -> T -where - T: Field + Send + Sync, -{ +fn inner_product(a: &[T], b: &[T]) -> T { assert_eq!(a.len(), b.len()); (0..a.len()) .into_par_iter() @@ -114,14 +112,18 @@ where /// An inner product instance consists of a commitment to a vector `a` and another vector `b` /// and the claim that c = . -pub struct InnerProductInstance { - comm_a_vec: Commitment, - b_vec: Vec, - c: G::Scalar, +pub struct InnerProductInstance { + comm_a_vec: Commitment, + b_vec: Vec, + c: E::Scalar, } -impl InnerProductInstance { - fn new(comm_a_vec: &Commitment, b_vec: &[G::Scalar], c: &G::Scalar) -> Self { +impl InnerProductInstance +where + E: Engine, + E::GE: DlogGroup, +{ + fn new(comm_a_vec: &Commitment, b_vec: &[E::Scalar], c: &E::Scalar) -> Self { InnerProductInstance { comm_a_vec: *comm_a_vec, b_vec: b_vec.to_vec(), @@ -130,7 +132,7 @@ impl InnerProductInstance { } } -impl TranscriptReprTrait for InnerProductInstance { +impl TranscriptReprTrait for InnerProductInstance { fn to_transcript_bytes(&self) -> Vec { // we do not need to include self.b_vec as in our context it is produced from the transcript [ @@ -141,12 +143,12 @@ impl TranscriptReprTrait for InnerProductInstance { } } -struct InnerProductWitness { - a_vec: Vec, +struct InnerProductWitness { + a_vec: Vec, } -impl InnerProductWitness { - fn new(a_vec: &[G::Scalar]) -> Self { +impl InnerProductWitness { + fn new(a_vec: &[E::Scalar]) -> Self { InnerProductWitness { a_vec: a_vec.to_vec(), } @@ -156,27 +158,28 @@ impl InnerProductWitness { /// An inner product argument #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "")] -pub struct InnerProductArgument { - L_vec: Vec>, - R_vec: Vec>, - a_hat: G::Scalar, +pub struct InnerProductArgument { + L_vec: Vec>, + R_vec: Vec>, + a_hat: E::Scalar, } -impl InnerProductArgument +impl InnerProductArgument where - G: GroupExt, - CommitmentKey: CommitmentKeyExtTrait, + E: Engine, + E::GE: DlogGroup, + CommitmentKey: CommitmentKeyExtTrait, { const fn protocol_name() -> &'static [u8] { b"IPA" } fn prove( - ck: &CommitmentKey, - ck_c: &CommitmentKey, - U: &InnerProductInstance, - W: &InnerProductWitness, - transcript: &mut G::TE, + ck: &CommitmentKey, + ck_c: &CommitmentKey, + U: &InnerProductInstance, + W: &InnerProductWitness, + transcript: &mut E::TE, ) -> Result { transcript.dom_sep(Self::protocol_name()); @@ -194,17 +197,17 @@ where let ck_c = ck_c.scale(&r); // a closure that executes a step of the recursive inner product argument - let prove_inner = |a_vec: &[G::Scalar], - b_vec: &[G::Scalar], - ck: &CommitmentKey, - transcript: &mut G::TE| + let prove_inner = |a_vec: &[E::Scalar], + b_vec: &[E::Scalar], + ck: &CommitmentKey, + transcript: &mut E::TE| -> Result< ( - CompressedCommitment, - CompressedCommitment, - Vec, - Vec, - CommitmentKey, + CompressedCommitment, + CompressedCommitment, + Vec, + Vec, + CommitmentKey, ), NovaError, > { @@ -214,22 +217,22 @@ where let c_L = inner_product(&a_vec[0..n / 2], &b_vec[n / 2..n]); let c_R = inner_product(&a_vec[n / 2..n], &b_vec[0..n / 2]); - let L = CE::::commit( + let L = CE::::commit( &ck_R.combine(&ck_c), &a_vec[0..n / 2] .iter() .chain(iter::once(&c_L)) .copied() - .collect::>(), + .collect::>(), ) .compress(); - let R = CE::::commit( + let R = CE::::commit( &ck_L.combine(&ck_c), &a_vec[n / 2..n] .iter() .chain(iter::once(&c_R)) .copied() - .collect::>(), + .collect::>(), ) .compress(); @@ -244,13 +247,13 @@ where .par_iter() .zip(a_vec[n / 2..n].par_iter()) .map(|(a_L, a_R)| *a_L * r + r_inverse * *a_R) - .collect::>(); + .collect::>(); let b_vec_folded = b_vec[0..n / 2] .par_iter() .zip(b_vec[n / 2..n].par_iter()) .map(|(b_L, b_R)| *b_L * r_inverse + r * *b_R) - .collect::>(); + .collect::>(); let ck_folded = ck.fold(&r_inverse, &r); @@ -258,8 +261,8 @@ where }; // two vectors to hold the logarithmic number of group elements - let mut L_vec: Vec> = Vec::new(); - let mut R_vec: Vec> = Vec::new(); + let mut L_vec: Vec> = Vec::new(); + let mut R_vec: Vec> = Vec::new(); // we create mutable copies of vectors and generators let mut a_vec = W.a_vec.to_vec(); @@ -285,11 +288,11 @@ where fn verify( &self, - ck: &CommitmentKey, - ck_c: &CommitmentKey, + ck: &CommitmentKey, + ck_c: &CommitmentKey, n: usize, - U: &InnerProductInstance, - transcript: &mut G::TE, + U: &InnerProductInstance, + transcript: &mut E::TE, ) -> Result<(), NovaError> { let (ck, _) = ck.split_at(U.b_vec.len()); @@ -309,11 +312,11 @@ where let r = transcript.squeeze(b"r")?; let ck_c = ck_c.scale(&r); - let P = U.comm_a_vec + CE::::commit(&ck_c, &[U.c]); + let P = U.comm_a_vec + CE::::commit(&ck_c, &[U.c]); - let batch_invert = |v: &[G::Scalar]| -> Result, NovaError> { - let mut products = vec![G::Scalar::ZERO; v.len()]; - let mut acc = G::Scalar::ONE; + let batch_invert = |v: &[E::Scalar]| -> Result, NovaError> { + let mut products = vec![E::Scalar::ZERO; v.len()]; + let mut acc = E::Scalar::ONE; for i in 0..v.len() { products[i] = acc; @@ -327,7 +330,7 @@ where }; // compute the inverse once for all entries - let mut inv = vec![G::Scalar::ZERO; v.len()]; + let mut inv = vec![E::Scalar::ZERO; v.len()]; for i in (0..v.len()).rev() { let tmp = acc * v[i]; inv[i] = products[i] * acc; @@ -344,24 +347,24 @@ where transcript.absorb(b"R", &self.R_vec[i]); transcript.squeeze(b"r") }) - .collect::, NovaError>>()?; + .collect::, NovaError>>()?; // precompute scalars necessary for verification - let r_square: Vec = (0..self.L_vec.len()) + let r_square: Vec = (0..self.L_vec.len()) .into_par_iter() .map(|i| r[i] * r[i]) .collect(); let r_inverse = batch_invert(&r)?; - let r_inverse_square: Vec = (0..self.L_vec.len()) + let r_inverse_square: Vec = (0..self.L_vec.len()) .into_par_iter() .map(|i| r_inverse[i] * r_inverse[i]) .collect(); // compute the vector with the tensor structure let s = { - let mut s = vec![G::Scalar::ZERO; n]; + let mut s = vec![E::Scalar::ZERO; n]; s[0] = { - let mut v = G::Scalar::ONE; + let mut v = E::Scalar::ONE; for r_inverse_i in r_inverse { v *= r_inverse_i; } @@ -375,32 +378,32 @@ where }; let ck_hat = { - let c = CE::::commit(&ck, &s).compress(); - CommitmentKey::::reinterpret_commitments_as_ck(&[c])? + let c = CE::::commit(&ck, &s).compress(); + CommitmentKey::::reinterpret_commitments_as_ck(&[c])? }; let b_hat = inner_product(&U.b_vec, &s); let P_hat = { let ck_folded = { - let ck_L = CommitmentKey::::reinterpret_commitments_as_ck(&self.L_vec)?; - let ck_R = CommitmentKey::::reinterpret_commitments_as_ck(&self.R_vec)?; - let ck_P = CommitmentKey::::reinterpret_commitments_as_ck(&[P.compress()])?; + let ck_L = CommitmentKey::::reinterpret_commitments_as_ck(&self.L_vec)?; + let ck_R = CommitmentKey::::reinterpret_commitments_as_ck(&self.R_vec)?; + let ck_P = CommitmentKey::::reinterpret_commitments_as_ck(&[P.compress()])?; ck_L.combine(&ck_R).combine(&ck_P) }; - CE::::commit( + CE::::commit( &ck_folded, &r_square .iter() .chain(r_inverse_square.iter()) - .chain(iter::once(&G::Scalar::ONE)) + .chain(iter::once(&E::Scalar::ONE)) .copied() - .collect::>(), + .collect::>(), ) }; - if P_hat == CE::::commit(&ck_hat.combine(&ck_c), &[self.a_hat, self.a_hat * b_hat]) { + if P_hat == CE::::commit(&ck_hat.combine(&ck_c), &[self.a_hat, self.a_hat * b_hat]) { Ok(()) } else { Err(NovaError::InvalidIPA) diff --git a/src/provider/keccak.rs b/src/provider/keccak.rs index 26139eb91..0cc06217c 100644 --- a/src/provider/keccak.rs +++ b/src/provider/keccak.rs @@ -2,7 +2,7 @@ use crate::traits::PrimeFieldExt; use crate::{ errors::NovaError, - traits::{Group, TranscriptEngineTrait, TranscriptReprTrait}, + traits::{Engine, TranscriptEngineTrait, TranscriptReprTrait}, }; use core::marker::PhantomData; use sha3::{Digest, Keccak256}; @@ -15,11 +15,11 @@ const KECCAK256_PREFIX_CHALLENGE_HI: u8 = 1; /// Provides an implementation of `TranscriptEngine` #[derive(Debug, Clone)] -pub struct Keccak256Transcript { +pub struct Keccak256Transcript { round: u16, state: [u8; KECCAK256_STATE_SIZE], transcript: Keccak256, - _p: PhantomData, + _p: PhantomData, } fn compute_updated_state(keccak_instance: Keccak256, input: &[u8]) -> [u8; KECCAK256_STATE_SIZE] { @@ -45,7 +45,7 @@ fn compute_updated_state(keccak_instance: Keccak256, input: &[u8]) -> [u8; KECCA .unwrap() } -impl TranscriptEngineTrait for Keccak256Transcript { +impl TranscriptEngineTrait for Keccak256Transcript { fn new(label: &'static [u8]) -> Self { let keccak_instance = Keccak256::new(); let input = [PERSONA_TAG, label].concat(); @@ -59,7 +59,7 @@ impl TranscriptEngineTrait for Keccak256Transcript { } } - fn squeeze(&mut self, label: &'static [u8]) -> Result { + fn squeeze(&mut self, label: &'static [u8]) -> Result { // we gather the full input from the round, preceded by the current state of the transcript let input = [ DOM_SEP_TAG, @@ -82,10 +82,10 @@ impl TranscriptEngineTrait for Keccak256Transcript { self.transcript = Keccak256::new(); // squeeze out a challenge - Ok(G::Scalar::from_uniform(&output)) + Ok(E::Scalar::from_uniform(&output)) } - fn absorb>(&mut self, label: &'static [u8], o: &T) { + fn absorb>(&mut self, label: &'static [u8], o: &T) { self.transcript.update(label); self.transcript.update(&o.to_transcript_bytes()); } @@ -99,53 +99,57 @@ impl TranscriptEngineTrait for Keccak256Transcript { #[cfg(test)] mod tests { use crate::{ - provider::bn256_grumpkin::bn256, - provider::{self, keccak::Keccak256Transcript, secp_secq}, - traits::{Group, PrimeFieldExt, TranscriptEngineTrait, TranscriptReprTrait}, + provider::keccak::Keccak256Transcript, + provider::{ + bn256_grumpkin::{Bn256Engine, GrumpkinEngine}, + pasta::{PallasEngine, VestaEngine}, + secp_secq::{Secp256k1Engine, Secq256k1Engine}, + }, + traits::{Engine, PrimeFieldExt, TranscriptEngineTrait, TranscriptReprTrait}, }; use ff::PrimeField; use rand::Rng; use sha3::{Digest, Keccak256}; - fn test_keccak_transcript_with(expected_h1: &'static str, expected_h2: &'static str) { - let mut transcript: Keccak256Transcript = Keccak256Transcript::new(b"test"); + fn test_keccak_transcript_with(expected_h1: &'static str, expected_h2: &'static str) { + let mut transcript: Keccak256Transcript = Keccak256Transcript::new(b"test"); // two scalars - let s1 = ::Scalar::from(2u64); - let s2 = ::Scalar::from(5u64); + let s1 = ::Scalar::from(2u64); + let s2 = ::Scalar::from(5u64); // add the scalars to the transcript transcript.absorb(b"s1", &s1); transcript.absorb(b"s2", &s2); // make a challenge - let c1: ::Scalar = transcript.squeeze(b"c1").unwrap(); + let c1: ::Scalar = transcript.squeeze(b"c1").unwrap(); assert_eq!(hex::encode(c1.to_repr().as_ref()), expected_h1); // a scalar - let s3 = ::Scalar::from(128u64); + let s3 = ::Scalar::from(128u64); // add the scalar to the transcript transcript.absorb(b"s3", &s3); // make a challenge - let c2: ::Scalar = transcript.squeeze(b"c2").unwrap(); + let c2: ::Scalar = transcript.squeeze(b"c2").unwrap(); assert_eq!(hex::encode(c2.to_repr().as_ref()), expected_h2); } #[test] fn test_keccak_transcript() { - test_keccak_transcript_with::( + test_keccak_transcript_with::( "5ddffa8dc091862132788b8976af88b9a2c70594727e611c7217ba4c30c8c70a", "4d4bf42c065870395749fa1c4fb641df1e0d53f05309b03d5b1db7f0be3aa13d", ); - test_keccak_transcript_with::( + test_keccak_transcript_with::( "9fb71e3b74bfd0b60d97349849b895595779a240b92a6fae86bd2812692b6b0e", "bfd4c50b7d6317e9267d5d65c985eb455a3561129c0b3beef79bfc8461a84f18", ); - test_keccak_transcript_with::( + test_keccak_transcript_with::( "9723aafb69ec8f0e9c7de756df0993247d98cf2b2f72fa353e3de654a177e310", "a6a90fcb6e1b1a2a2f84c950ef1510d369aea8e42085f5c629bfa66d00255f25", ); @@ -206,13 +210,13 @@ mod tests { // This test is meant to ensure compatibility between the incremental way of computing the transcript above, and // the former, which materialized the entirety of the input vector before calling Keccak256 on it. - fn test_keccak_transcript_incremental_vs_explicit_with() { + fn test_keccak_transcript_incremental_vs_explicit_with() { let test_label = b"test"; - let mut transcript: Keccak256Transcript = Keccak256Transcript::new(test_label); + let mut transcript: Keccak256Transcript = Keccak256Transcript::new(test_label); let mut rng = rand::thread_rng(); // ten scalars - let scalars = std::iter::from_fn(|| Some(::Scalar::from(rng.gen::()))) + let scalars = std::iter::from_fn(|| Some(::Scalar::from(rng.gen::()))) .take(10) .collect::>(); @@ -233,18 +237,20 @@ mod tests { let initial_state = compute_updated_state_for_testing(&input); // make a challenge - let c1: ::Scalar = transcript.squeeze(b"c1").unwrap(); + let c1: ::Scalar = transcript.squeeze(b"c1").unwrap(); let c1_bytes = squeeze_for_testing(&manual_transcript[..], 0u16, initial_state, b"c1"); - let to_hex = |g: G::Scalar| hex::encode(g.to_repr().as_ref()); - assert_eq!(to_hex(c1), to_hex(G::Scalar::from_uniform(&c1_bytes))); + let to_hex = |g: E::Scalar| hex::encode(g.to_repr().as_ref()); + assert_eq!(to_hex(c1), to_hex(E::Scalar::from_uniform(&c1_bytes))); } #[test] fn test_keccak_transcript_incremental_vs_explicit() { - test_keccak_transcript_incremental_vs_explicit_with::(); - test_keccak_transcript_incremental_vs_explicit_with::(); - test_keccak_transcript_incremental_vs_explicit_with::(); - test_keccak_transcript_incremental_vs_explicit_with::(); + test_keccak_transcript_incremental_vs_explicit_with::(); + test_keccak_transcript_incremental_vs_explicit_with::(); + test_keccak_transcript_incremental_vs_explicit_with::(); + test_keccak_transcript_incremental_vs_explicit_with::(); + test_keccak_transcript_incremental_vs_explicit_with::(); + test_keccak_transcript_incremental_vs_explicit_with::(); } } diff --git a/src/provider/mod.rs b/src/provider/mod.rs index b27e35deb..fa4b9e07b 100644 --- a/src/provider/mod.rs +++ b/src/provider/mod.rs @@ -1,6 +1,6 @@ //! This module implements Nova's traits using the following configuration: //! `CommitmentEngine` with Pedersen's commitments -//! `Group` with pasta curves and BN256/Grumpkin +//! `Engine` with pasta curves and BN256/Grumpkin //! `RO` traits with Poseidon //! `EvaluationEngine` with an IPA-based polynomial evaluation argument use crate::traits::{commitment::ScalarMul, Group, TranscriptReprTrait}; @@ -24,7 +24,7 @@ pub trait CompressedGroup: + 'static { /// A type that holds the decompressed version of the compressed group element - type GroupElement: Group; + type GroupElement: DlogGroup; /// Decompresses the compressed group element fn decompress(&self) -> Option; @@ -50,7 +50,7 @@ pub trait ScalarMulOwned: for<'r> ScalarMul<&'r Rhs, Output> impl ScalarMulOwned for T where T: for<'r> ScalarMul<&'r Rhs, Output> {} /// A trait that defines extensions to the Group trait -pub trait GroupExt: +pub trait DlogGroup: Group + Serialize + for<'de> Deserialize<'de> @@ -91,7 +91,7 @@ pub trait GroupExt: fn zero() -> Self; /// Returns the affine coordinates (x, y, infinty) for the point - fn to_coordinates(&self) -> (Self::Base, Self::Base, bool); + fn to_coordinates(&self) -> (::Base, ::Base, bool); } pub mod bn256_grumpkin; @@ -221,6 +221,7 @@ pub(crate) fn cpu_best_multiexp(coeffs: &[C::Scalar], bases: &[C #[macro_export] macro_rules! impl_traits { ( + $engine:ident, $name:ident, $name_compressed:ident, $name_curve:ident, @@ -228,15 +229,21 @@ macro_rules! impl_traits { $order_str:literal, $base_str:literal ) => { - impl Group for $name::Point { + impl Engine for $engine { type Base = $name::Base; type Scalar = $name::Scalar; + type GE = $name::Point; type RO = PoseidonRO; type ROCircuit = PoseidonROCircuit; type TE = Keccak256Transcript; type CE = CommitmentEngine; + } + + impl Group for $name::Point { + type Base = $name::Base; + type Scalar = $name::Scalar; - fn get_curve_params() -> (Self::Base, Self::Base, BigInt, BigInt) { + fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt) { let A = $name::Point::a(); let B = $name::Point::b(); let order = BigInt::from_str_radix($order_str, 16).unwrap(); @@ -246,7 +253,7 @@ macro_rules! impl_traits { } } - impl GroupExt for $name::Point { + impl DlogGroup for $name::Point { type CompressedGroupElement = $name_compressed; type PreprocessedGroupElement = $name::Affine; @@ -335,7 +342,7 @@ macro_rules! impl_traits { } } - impl TranscriptReprTrait for $name_compressed { + impl TranscriptReprTrait for $name_compressed { fn to_transcript_bytes(&self) -> Vec { self.as_ref().to_vec() } diff --git a/src/provider/pasta.rs b/src/provider/pasta.rs index dc1663f59..0cb0cb5ae 100644 --- a/src/provider/pasta.rs +++ b/src/provider/pasta.rs @@ -5,9 +5,9 @@ use crate::{ keccak::Keccak256Transcript, pedersen::CommitmentEngine, poseidon::{PoseidonRO, PoseidonROCircuit}, - CompressedGroup, GroupExt, + CompressedGroup, DlogGroup, }, - traits::{Group, PrimeFieldExt, TranscriptReprTrait}, + traits::{Engine, Group, PrimeFieldExt, TranscriptReprTrait}, }; use digest::{ExtendableOutput, Update}; use ff::{FromUniformBytes, PrimeField}; @@ -50,8 +50,17 @@ impl VestaCompressedElementWrapper { } } +/// An implementation of the Nova `Engine` trait with Pallas curve and Pedersen commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct PallasEngine; + +/// An implementation of the Nova `Engine` trait with Vesta curve and Pedersen commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct VestaEngine; + macro_rules! impl_traits { ( + $engine:ident, $name:ident, $name_compressed:ident, $name_curve:ident, @@ -59,15 +68,21 @@ macro_rules! impl_traits { $order_str:literal, $base_str:literal ) => { - impl Group for $name::Point { + impl Engine for $engine { type Base = $name::Base; type Scalar = $name::Scalar; + type GE = $name::Point; type RO = PoseidonRO; type ROCircuit = PoseidonROCircuit; type TE = Keccak256Transcript; type CE = CommitmentEngine; + } + + impl Group for $name::Point { + type Base = $name::Base; + type Scalar = $name::Scalar; - fn get_curve_params() -> (Self::Base, Self::Base, BigInt, BigInt) { + fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt) { let A = $name::Point::a(); let B = $name::Point::b(); let order = BigInt::from_str_radix($order_str, 16).unwrap(); @@ -77,7 +92,7 @@ macro_rules! impl_traits { } } - impl GroupExt for $name::Point { + impl DlogGroup for $name::Point { type CompressedGroupElement = $name_compressed; type PreprocessedGroupElement = $name::Affine; @@ -181,7 +196,7 @@ macro_rules! impl_traits { } } - impl TranscriptReprTrait for $name_compressed { + impl TranscriptReprTrait for $name_compressed { fn to_transcript_bytes(&self) -> Vec { self.repr.to_vec() } @@ -204,6 +219,7 @@ macro_rules! impl_traits { } impl_traits!( + PallasEngine, pallas, PallasCompressedElementWrapper, Ep, @@ -213,6 +229,7 @@ impl_traits!( ); impl_traits!( + VestaEngine, vesta, VestaCompressedElementWrapper, Eq, @@ -224,7 +241,7 @@ impl_traits!( #[cfg(test)] mod tests { use super::*; - type G = pasta_curves::pallas::Point; + type G = ::GE; fn from_label_serial(label: &'static [u8], n: usize) -> Vec { let mut shake = Shake256::default(); @@ -246,7 +263,7 @@ mod tests { for n in [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1021, ] { - let ck_par = ::from_label(label, n); + let ck_par = ::from_label(label, n); let ck_ser = from_label_serial(label, n); assert_eq!(ck_par.len(), n); assert_eq!(ck_ser.len(), n); diff --git a/src/provider/pedersen.rs b/src/provider/pedersen.rs index d22f50609..acf3928d5 100644 --- a/src/provider/pedersen.rs +++ b/src/provider/pedersen.rs @@ -1,17 +1,17 @@ //! This module provides an implementation of a commitment engine use crate::{ errors::NovaError, - provider::{CompressedGroup, GroupExt}, + provider::{CompressedGroup, DlogGroup}, traits::{ commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, - AbsorbInROTrait, Group, ROTrait, TranscriptReprTrait, + AbsorbInROTrait, Engine, ROTrait, TranscriptReprTrait, }, }; use abomonation_derive::Abomonation; use core::{ fmt::Debug, marker::PhantomData, - ops::{Add, AddAssign, Mul, MulAssign}, + ops::{Add, Mul, MulAssign}, }; use ff::Field; use rayon::prelude::*; @@ -20,13 +20,21 @@ use serde::{Deserialize, Serialize}; /// A type that holds commitment generators #[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Abomonation)] #[abomonation_omit_bounds] -pub struct CommitmentKey { +pub struct CommitmentKey +where + E: Engine, + E::GE: DlogGroup, +{ #[abomonate_with(Vec<[u64; 8]>)] // this is a hack; we just assume the size of the element. - ck: Vec, + ck: Vec<::PreprocessedGroupElement>, } /// [CommitmentKey]s are often large, and this helps with cloning bottlenecks -impl Clone for CommitmentKey { +impl Clone for CommitmentKey +where + E: Engine, + E::GE: DlogGroup, +{ fn clone(&self) -> Self { Self { ck: self.ck.par_iter().cloned().collect(), @@ -34,7 +42,11 @@ impl Clone for CommitmentKey { } } -impl Len for CommitmentKey { +impl Len for CommitmentKey +where + E: Engine, + E::GE: DlogGroup, +{ fn length(&self) -> usize { self.ck.len() } @@ -44,20 +56,28 @@ impl Len for CommitmentKey { #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, Abomonation)] #[serde(bound = "")] #[abomonation_omit_bounds] -pub struct Commitment { +pub struct Commitment { #[abomonate_with(Vec<[u64; 12]>)] // this is a hack; we just assume the size of the element. - pub(crate) comm: G, + pub(crate) comm: E::GE, } /// A type that holds a compressed commitment #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] -pub struct CompressedCommitment { - comm: G::CompressedGroupElement, +pub struct CompressedCommitment +where + E: Engine, + E::GE: DlogGroup, +{ + comm: ::CompressedGroupElement, } -impl CommitmentTrait for Commitment { - type CompressedCommitment = CompressedCommitment; +impl CommitmentTrait for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + type CompressedCommitment = CompressedCommitment; fn compress(&self) -> Self::CompressedCommitment { CompressedCommitment { @@ -65,12 +85,12 @@ impl CommitmentTrait for Commitment { } } - fn to_coordinates(&self) -> (G::Base, G::Base, bool) { + fn to_coordinates(&self) -> (E::Base, E::Base, bool) { self.comm.to_coordinates() } fn decompress(c: &Self::CompressedCommitment) -> Result { - let comm = ::CompressedGroupElement::decompress(&c.comm); + let comm = <::GE as DlogGroup>::CompressedGroupElement::decompress(&c.comm); if comm.is_none() { return Err(NovaError::DecompressionError); } @@ -80,13 +100,23 @@ impl CommitmentTrait for Commitment { } } -impl Default for Commitment { +impl Default for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ fn default() -> Self { - Commitment { comm: G::zero() } + Commitment { + comm: E::GE::zero(), + } } } -impl TranscriptReprTrait for Commitment { +impl TranscriptReprTrait for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ fn to_transcript_bytes(&self) -> Vec { let (x, y, is_infinity) = self.comm.to_coordinates(); let is_infinity_byte = (!is_infinity).into(); @@ -99,131 +129,120 @@ impl TranscriptReprTrait for Commitment { } } -impl AbsorbInROTrait for Commitment { - fn absorb_in_ro(&self, ro: &mut G::RO) { +impl AbsorbInROTrait for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + fn absorb_in_ro(&self, ro: &mut E::RO) { let (x, y, is_infinity) = self.comm.to_coordinates(); ro.absorb(x); ro.absorb(y); ro.absorb(if is_infinity { - G::Base::ONE + E::Base::ONE } else { - G::Base::ZERO + E::Base::ZERO }); } } -impl TranscriptReprTrait for CompressedCommitment { +impl TranscriptReprTrait for CompressedCommitment +where + E: Engine, + E::GE: DlogGroup, +{ fn to_transcript_bytes(&self) -> Vec { self.comm.to_transcript_bytes() } } -impl MulAssign for Commitment { - fn mul_assign(&mut self, scalar: G::Scalar) { - let result = (self as &Commitment).comm * scalar; - *self = Commitment { comm: result }; +impl MulAssign for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + fn mul_assign(&mut self, scalar: E::Scalar) { + *self = Commitment { + comm: self.comm * scalar, + }; } } -impl<'a, 'b, G: GroupExt> Mul<&'b G::Scalar> for &'a Commitment { - type Output = Commitment; - fn mul(self, scalar: &'b G::Scalar) -> Commitment { +impl<'a, 'b, E> Mul<&'b E::Scalar> for &'a Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + type Output = Commitment; + fn mul(self, scalar: &'b E::Scalar) -> Commitment { Commitment { comm: self.comm * scalar, } } } -impl Mul for Commitment { - type Output = Commitment; +impl Mul for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + type Output = Commitment; - fn mul(self, scalar: G::Scalar) -> Commitment { + fn mul(self, scalar: E::Scalar) -> Commitment { Commitment { comm: self.comm * scalar, } } } -impl<'b, G: GroupExt> AddAssign<&'b Commitment> for Commitment { - fn add_assign(&mut self, other: &'b Commitment) { - let result = (self as &Commitment).comm + other.comm; - *self = Commitment { comm: result }; - } -} +impl Add for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + type Output = Commitment; -impl<'a, 'b, G: GroupExt> Add<&'b Commitment> for &'a Commitment { - type Output = Commitment; - fn add(self, other: &'b Commitment) -> Commitment { + fn add(self, other: Commitment) -> Commitment { Commitment { comm: self.comm + other.comm, } } } -macro_rules! define_add_variants { - (G = $g:path, LHS = $lhs:ty, RHS = $rhs:ty, Output = $out:ty) => { - impl<'b, G: $g> Add<&'b $rhs> for $lhs { - type Output = $out; - fn add(self, rhs: &'b $rhs) -> $out { - &self + rhs - } - } - - impl<'a, G: $g> Add<$rhs> for &'a $lhs { - type Output = $out; - fn add(self, rhs: $rhs) -> $out { - self + &rhs - } - } - - impl Add<$rhs> for $lhs { - type Output = $out; - fn add(self, rhs: $rhs) -> $out { - &self + &rhs - } - } - }; -} - -macro_rules! define_add_assign_variants { - (G = $g:path, LHS = $lhs:ty, RHS = $rhs:ty) => { - impl AddAssign<$rhs> for $lhs { - fn add_assign(&mut self, rhs: $rhs) { - *self += &rhs; - } - } - }; -} - -define_add_assign_variants!(G = GroupExt, LHS = Commitment, RHS = Commitment); -define_add_variants!(G = GroupExt, LHS = Commitment, RHS = Commitment, Output = Commitment); - /// Provides a commitment engine #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct CommitmentEngine { - _p: PhantomData, +pub struct CommitmentEngine { + _p: PhantomData, } -impl CommitmentEngineTrait for CommitmentEngine { - type CommitmentKey = CommitmentKey; - type Commitment = Commitment; +impl CommitmentEngineTrait for CommitmentEngine +where + E: Engine, + E::GE: DlogGroup, +{ + type CommitmentKey = CommitmentKey; + type Commitment = Commitment; fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey { Self::CommitmentKey { - ck: G::from_label(label, n.next_power_of_two()), + ck: E::GE::from_label(label, n.next_power_of_two()), } } - fn commit(ck: &Self::CommitmentKey, v: &[G::Scalar]) -> Self::Commitment { + fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment { assert!(ck.ck.len() >= v.len()); Commitment { - comm: G::vartime_multiscalar_mul(v, &ck.ck[..v.len()]), + comm: E::GE::vartime_multiscalar_mul(v, &ck.ck[..v.len()]), } } } /// A trait listing properties of a commitment key that can be managed in a divide-and-conquer fashion -pub trait CommitmentKeyExtTrait { +pub trait CommitmentKeyExtTrait +where + E: Engine, + E::GE: DlogGroup, +{ /// Splits the commitment key into two pieces at a specified point fn split_at(&self, n: usize) -> (Self, Self) where @@ -233,21 +252,25 @@ pub trait CommitmentKeyExtTrait { fn combine(&self, other: &Self) -> Self; /// Folds the two commitment keys into one using the provided weights - fn fold(&self, w1: &G::Scalar, w2: &G::Scalar) -> Self; + fn fold(&self, w1: &E::Scalar, w2: &E::Scalar) -> Self; /// Scales the commitment key using the provided scalar - fn scale(&self, r: &G::Scalar) -> Self; + fn scale(&self, r: &E::Scalar) -> Self; /// Reinterprets commitments as commitment keys fn reinterpret_commitments_as_ck( - c: &[<<::CE as CommitmentEngineTrait>::Commitment as CommitmentTrait>::CompressedCommitment], + c: &[<<::CE as CommitmentEngineTrait>::Commitment as CommitmentTrait>::CompressedCommitment], ) -> Result where Self: Sized; } -impl> + GroupExt> CommitmentKeyExtTrait for CommitmentKey { - fn split_at(&self, n: usize) -> (CommitmentKey, CommitmentKey) { +impl CommitmentKeyExtTrait for CommitmentKey +where + E: Engine>, + E::GE: DlogGroup, +{ + fn split_at(&self, n: usize) -> (CommitmentKey, CommitmentKey) { ( CommitmentKey { ck: self.ck[0..n].to_vec(), @@ -258,7 +281,7 @@ impl> + GroupExt> CommitmentKeyExtTrait for ) } - fn combine(&self, other: &CommitmentKey) -> CommitmentKey { + fn combine(&self, other: &CommitmentKey) -> CommitmentKey { let ck = { let mut c = self.ck.clone(); c.extend(other.ck.clone()); @@ -268,7 +291,7 @@ impl> + GroupExt> CommitmentKeyExtTrait for } // combines the left and right halves of `self` using `w1` and `w2` as the weights - fn fold(&self, w1: &G::Scalar, w2: &G::Scalar) -> CommitmentKey { + fn fold(&self, w1: &E::Scalar, w2: &E::Scalar) -> CommitmentKey { let w = vec![*w1, *w2]; let (L, R) = self.split_at(self.ck.len() / 2); @@ -276,7 +299,7 @@ impl> + GroupExt> CommitmentKeyExtTrait for .into_par_iter() .map(|i| { let bases = [L.ck[i].clone(), R.ck[i].clone()].to_vec(); - G::vartime_multiscalar_mul(&w, &bases).preprocessed() + E::GE::vartime_multiscalar_mul(&w, &bases).preprocessed() }) .collect(); @@ -284,23 +307,23 @@ impl> + GroupExt> CommitmentKeyExtTrait for } /// Scales each element in `self` by `r` - fn scale(&self, r: &G::Scalar) -> Self { + fn scale(&self, r: &E::Scalar) -> Self { let ck_scaled = self .ck .clone() .into_par_iter() - .map(|g| G::vartime_multiscalar_mul(&[*r], &[g]).preprocessed()) + .map(|g| E::GE::vartime_multiscalar_mul(&[*r], &[g]).preprocessed()) .collect(); CommitmentKey { ck: ck_scaled } } /// reinterprets a vector of commitments as a set of generators - fn reinterpret_commitments_as_ck(c: &[CompressedCommitment]) -> Result { + fn reinterpret_commitments_as_ck(c: &[CompressedCommitment]) -> Result { let d = (0..c.len()) .into_par_iter() - .map(|i| Commitment::::decompress(&c[i])) - .collect::>, NovaError>>()?; + .map(|i| Commitment::::decompress(&c[i])) + .collect::>, NovaError>>()?; let ck = (0..d.len()) .into_par_iter() .map(|i| d[i].comm.preprocessed()) diff --git a/src/provider/poseidon.rs b/src/provider/poseidon.rs index 070fa2b3b..cae48d350 100644 --- a/src/provider/poseidon.rs +++ b/src/provider/poseidon.rs @@ -116,10 +116,7 @@ where /// A Poseidon-based RO gadget to use inside the verifier circuit. #[derive(Serialize, Deserialize)] -pub struct PoseidonROCircuit -where - Scalar: PrimeField, -{ +pub struct PoseidonROCircuit { // Internal state state: Vec>, constants: PoseidonConstantsCircuit, @@ -152,14 +149,11 @@ where } /// Compute a challenge by hashing the current state - fn squeeze( + fn squeeze>( &mut self, mut cs: CS, num_bits: usize, - ) -> Result, SynthesisError> - where - CS: ConstraintSystem, - { + ) -> Result, SynthesisError> { // check if we have squeezed already assert!(!self.squeezed, "Cannot squeeze again after squeezing"); self.squeezed = true; @@ -209,32 +203,37 @@ where #[cfg(test)] mod tests { use super::*; - use crate::provider::{bn256_grumpkin::bn256, secp_secq}; + use crate::provider::{ + bn256_grumpkin::{Bn256Engine, GrumpkinEngine}, + pasta::{PallasEngine, VestaEngine}, + secp_secq::{Secp256k1Engine, Secq256k1Engine}, + }; use crate::{ bellpepper::solver::SatisfyingAssignment, constants::NUM_CHALLENGE_BITS, - gadgets::utils::le_bits_to_num, traits::Group, + gadgets::utils::le_bits_to_num, traits::Engine, }; use ff::Field; use rand::rngs::OsRng; - fn test_poseidon_ro_with() + fn test_poseidon_ro_with() where - // we can print the field elements we get from G's Base & Scalar fields, + // we can print the field elements we get from E's Base & Scalar fields, // and compare their byte representations - <::Base as PrimeField>::Repr: std::fmt::Debug + Abomonation, - <::Scalar as PrimeField>::Repr: std::fmt::Debug + Abomonation, - <::Base as PrimeField>::Repr: PartialEq<<::Scalar as PrimeField>::Repr>, + <::Base as PrimeField>::Repr: std::fmt::Debug + Abomonation, + <::Scalar as PrimeField>::Repr: std::fmt::Debug + Abomonation, + <::Base as PrimeField>::Repr: + PartialEq<<::Scalar as PrimeField>::Repr>, { // Check that the number computed inside the circuit is equal to the number computed outside the circuit let mut csprng: OsRng = OsRng; - let constants = PoseidonConstantsCircuit::::default(); + let constants = PoseidonConstantsCircuit::::default(); let num_absorbs = 32; - let mut ro: PoseidonRO = PoseidonRO::new(constants.clone(), num_absorbs); - let mut ro_gadget: PoseidonROCircuit = + let mut ro: PoseidonRO = PoseidonRO::new(constants.clone(), num_absorbs); + let mut ro_gadget: PoseidonROCircuit = PoseidonROCircuit::new(constants, num_absorbs); - let mut cs = SatisfyingAssignment::::new(); + let mut cs = SatisfyingAssignment::::new(); for i in 0..num_absorbs { - let num = G::Scalar::random(&mut csprng); + let num = E::Scalar::random(&mut csprng); ro.absorb(num); let num_gadget = AllocatedNum::alloc_infallible(cs.namespace(|| format!("data {i}")), || num); num_gadget @@ -250,9 +249,11 @@ mod tests { #[test] fn test_poseidon_ro() { - test_poseidon_ro_with::(); - test_poseidon_ro_with::(); - test_poseidon_ro_with::(); - test_poseidon_ro_with::(); + test_poseidon_ro_with::(); + test_poseidon_ro_with::(); + test_poseidon_ro_with::(); + test_poseidon_ro_with::(); + test_poseidon_ro_with::(); + test_poseidon_ro_with::(); } } diff --git a/src/provider/secp_secq.rs b/src/provider/secp_secq.rs index 13f2c1cf8..ef6251859 100644 --- a/src/provider/secp_secq.rs +++ b/src/provider/secp_secq.rs @@ -6,9 +6,9 @@ use crate::{ keccak::Keccak256Transcript, pedersen::CommitmentEngine, poseidon::{PoseidonRO, PoseidonROCircuit}, - CompressedGroup, GroupExt, + CompressedGroup, DlogGroup, }, - traits::{Group, PrimeFieldExt, TranscriptReprTrait}, + traits::{Engine, Group, PrimeFieldExt, TranscriptReprTrait}, }; use digest::{ExtendableOutput, Update}; use ff::{FromUniformBytes, PrimeField}; @@ -39,7 +39,16 @@ pub mod secq256k1 { }; } +/// An implementation of the Nova `Engine` trait with Secp256k1 curve and Pedersen commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Secp256k1Engine; + +/// An implementation of the Nova `Engine` trait with Secp256k1 curve and Pedersen commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Secq256k1Engine; + impl_traits!( + Secp256k1Engine, secp256k1, Secp256k1Compressed, Secp256k1, @@ -49,6 +58,7 @@ impl_traits!( ); impl_traits!( + Secq256k1Engine, secq256k1, Secq256k1Compressed, Secq256k1, @@ -82,7 +92,7 @@ mod tests { for n in [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1021, ] { - let ck_par = ::from_label(label, n); + let ck_par = ::from_label(label, n); let ck_ser = from_label_serial(label, n); assert_eq!(ck_par.len(), n); assert_eq!(ck_ser.len(), n); diff --git a/src/r1cs/mod.rs b/src/r1cs/mod.rs index 6a261a400..afb074de6 100644 --- a/src/r1cs/mod.rs +++ b/src/r1cs/mod.rs @@ -12,14 +12,14 @@ use crate::{ utils::scalar_as_base, }, traits::{ - commitment::CommitmentEngineTrait, AbsorbInROTrait, Group, ROTrait, TranscriptReprTrait, + commitment::CommitmentEngineTrait, AbsorbInROTrait, Engine, ROTrait, TranscriptReprTrait, }, Commitment, CommitmentKey, CE, }; use abomonation::Abomonation; use abomonation_derive::Abomonation; use core::cmp::max; -use ff::Field; +use ff::{Field, PrimeField}; use once_cell::sync::OnceCell; use rayon::prelude::*; @@ -29,54 +29,54 @@ pub(crate) use self::sparse::SparseMatrix; /// A type that holds the shape of the R1CS matrices #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Abomonation)] -#[abomonation_bounds(where ::Repr: Abomonation)] -pub struct R1CSShape { +#[abomonation_bounds(where ::Repr: Abomonation)] +pub struct R1CSShape { pub(crate) num_cons: usize, pub(crate) num_vars: usize, pub(crate) num_io: usize, - pub(crate) A: SparseMatrix, - pub(crate) B: SparseMatrix, - pub(crate) C: SparseMatrix, - #[abomonation_skip] + pub(crate) A: SparseMatrix, + pub(crate) B: SparseMatrix, + pub(crate) C: SparseMatrix, #[serde(skip, default = "OnceCell::new")] - pub(crate) digest: OnceCell, + #[abomonate_with(::Repr)] + pub(crate) digest: OnceCell, } -impl SimpleDigestible for R1CSShape {} +impl SimpleDigestible for R1CSShape {} /// A type that holds a witness for a given R1CS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct R1CSWitness { - W: Vec, +pub struct R1CSWitness { + W: Vec, } /// A type that holds an R1CS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] -pub struct R1CSInstance { - pub(crate) comm_W: Commitment, - pub(crate) X: Vec, +pub struct R1CSInstance { + pub(crate) comm_W: Commitment, + pub(crate) X: Vec, } /// A type that holds a witness for a given Relaxed R1CS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct RelaxedR1CSWitness { - pub(crate) W: Vec, - pub(crate) E: Vec, +pub struct RelaxedR1CSWitness { + pub(crate) W: Vec, + pub(crate) E: Vec, } /// A type that holds a Relaxed R1CS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] -pub struct RelaxedR1CSInstance { - pub(crate) comm_W: Commitment, - pub(crate) comm_E: Commitment, - pub(crate) X: Vec, - pub(crate) u: G::Scalar, +pub struct RelaxedR1CSInstance { + pub(crate) comm_W: Commitment, + pub(crate) comm_E: Commitment, + pub(crate) X: Vec, + pub(crate) u: E::Scalar, } /// A type for functions that hints commitment key sizing by returning the floor of the number of required generators. -pub type CommitmentKeyHint = dyn Fn(&R1CSShape) -> usize; +pub type CommitmentKeyHint = dyn Fn(&R1CSShape) -> usize; /// Generates public parameters for a Rank-1 Constraint System (R1CS). /// @@ -89,36 +89,36 @@ pub type CommitmentKeyHint = dyn Fn(&R1CSShape) -> usize; /// * `ck_floor`: A function that provides a floor for the number of generators. A good function to /// provide is the `commitment_key_floor` field in the trait `RelaxedR1CSSNARKTrait`. /// -pub fn commitment_key( - S: &R1CSShape, - ck_floor: &CommitmentKeyHint, -) -> CommitmentKey { +pub fn commitment_key( + S: &R1CSShape, + ck_floor: &CommitmentKeyHint, +) -> CommitmentKey { let size = commitment_key_size(S, ck_floor); - G::CE::setup(b"ck", size) + E::CE::setup(b"ck", size) } /// Computes the number of generators required for the commitment key corresponding to shape `S`. -pub fn commitment_key_size(S: &R1CSShape, ck_floor: &CommitmentKeyHint) -> usize { +pub fn commitment_key_size(S: &R1CSShape, ck_floor: &CommitmentKeyHint) -> usize { let num_cons = S.num_cons; let num_vars = S.num_vars; let ck_hint = ck_floor(S); max(max(num_cons, num_vars), ck_hint) } -impl R1CSShape { +impl R1CSShape { /// Create an object of type `R1CSShape` from the explicitly specified R1CS matrices pub fn new( num_cons: usize, num_vars: usize, num_io: usize, - A: SparseMatrix, - B: SparseMatrix, - C: SparseMatrix, - ) -> Result, NovaError> { + A: SparseMatrix, + B: SparseMatrix, + C: SparseMatrix, + ) -> Result, NovaError> { let is_valid = |num_cons: usize, num_vars: usize, num_io: usize, - M: &SparseMatrix| + M: &SparseMatrix| -> Result, NovaError> { M.iter() .map(|(row, col, _val)| { @@ -151,8 +151,8 @@ impl R1CSShape { }) } - /// returnd the digest of the `R1CSShape` - pub fn digest(&self) -> G::Scalar { + /// returned the digest of the `R1CSShape` + pub fn digest(&self) -> E::Scalar { self .digest .get_or_try_init(|| DigestComputer::new(self).digest()) @@ -173,8 +173,8 @@ impl R1CSShape { pub(crate) fn multiply_vec( &self, - z: &[G::Scalar], - ) -> Result<(Vec, Vec, Vec), NovaError> { + z: &[E::Scalar], + ) -> Result<(Vec, Vec, Vec), NovaError> { if z.len() != self.num_io + self.num_vars + 1 { return Err(NovaError::InvalidWitnessLength); } @@ -190,9 +190,9 @@ impl R1CSShape { /// Checks if the Relaxed R1CS instance is satisfiable given a witness and its shape pub fn is_sat_relaxed( &self, - ck: &CommitmentKey, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, + ck: &CommitmentKey, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, ) -> Result<(), NovaError> { assert_eq!(W.W.len(), self.num_vars); assert_eq!(W.E.len(), self.num_cons); @@ -220,7 +220,7 @@ impl R1CSShape { // verify if comm_E and comm_W are commitments to E and W let res_comm = { let (comm_W, comm_E) = - rayon::join(|| CE::::commit(ck, &W.W), || CE::::commit(ck, &W.E)); + rayon::join(|| CE::::commit(ck, &W.W), || CE::::commit(ck, &W.E)); U.comm_W == comm_W && U.comm_E == comm_E }; @@ -233,16 +233,16 @@ impl R1CSShape { /// Checks if the R1CS instance is satisfiable given a witness and its shape pub fn is_sat( &self, - ck: &CommitmentKey, - U: &R1CSInstance, - W: &R1CSWitness, + ck: &CommitmentKey, + U: &R1CSInstance, + W: &R1CSWitness, ) -> Result<(), NovaError> { assert_eq!(W.W.len(), self.num_vars); assert_eq!(U.X.len(), self.num_io); // verify if Az * Bz = u*Cz let res_eq = { - let z = [W.W.clone(), vec![G::Scalar::ONE], U.X.clone()].concat(); + let z = [W.W.clone(), vec![E::Scalar::ONE], U.X.clone()].concat(); let (Az, Bz, Cz) = self.multiply_vec(&z)?; assert_eq!(Az.len(), self.num_cons); assert_eq!(Bz.len(), self.num_cons); @@ -260,7 +260,7 @@ impl R1CSShape { res_eq?; // verify if comm_W is a commitment to W - if U.comm_W != CE::::commit(ck, &W.W) { + if U.comm_W != CE::::commit(ck, &W.W) { return Err(NovaError::UnSat); } Ok(()) @@ -270,19 +270,19 @@ impl R1CSShape { /// Relaxed R1CS instance-witness pair and an R1CS instance-witness pair pub fn commit_T( &self, - ck: &CommitmentKey, - U1: &RelaxedR1CSInstance, - W1: &RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) -> Result<(Vec, Commitment), NovaError> { + ck: &CommitmentKey, + U1: &RelaxedR1CSInstance, + W1: &RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) -> Result<(Vec, Commitment), NovaError> { let (AZ_1, BZ_1, CZ_1) = tracing::trace_span!("AZ_1, BZ_1, CZ_1").in_scope(|| { let Z1 = [W1.W.clone(), vec![U1.u], U1.X.clone()].concat(); self.multiply_vec(&Z1) })?; let (AZ_2, BZ_2, CZ_2) = tracing::trace_span!("AZ_2, BZ_2, CZ_2").in_scope(|| { - let Z2 = [W2.W.clone(), vec![G::Scalar::ONE], U2.X.clone()].concat(); + let Z2 = [W2.W.clone(), vec![E::Scalar::ONE], U2.X.clone()].concat(); self.multiply_vec(&Z2) })?; @@ -291,19 +291,19 @@ impl R1CSShape { let AZ_1_circ_BZ_2 = (0..AZ_1.len()) .into_par_iter() .map(|i| AZ_1[i] * BZ_2[i]) - .collect::>(); + .collect::>(); let AZ_2_circ_BZ_1 = (0..AZ_2.len()) .into_par_iter() .map(|i| AZ_2[i] * BZ_1[i]) - .collect::>(); + .collect::>(); let u_1_cdot_CZ_2 = (0..CZ_2.len()) .into_par_iter() .map(|i| U1.u * CZ_2[i]) - .collect::>(); + .collect::>(); let u_2_cdot_CZ_1 = (0..CZ_1.len()) .into_par_iter() .map(|i| CZ_1[i]) - .collect::>(); + .collect::>(); (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) }); @@ -314,10 +314,10 @@ impl R1CSShape { .zip(&u_1_cdot_CZ_2) .zip(&u_2_cdot_CZ_1) .map(|(((a, b), c), d)| *a + *b - *c - *d) - .collect::>() + .collect::>() }); - let comm_T = CE::::commit(ck, &T); + let comm_T = CE::::commit(ck, &T); Ok((T, comm_T)) } @@ -351,7 +351,7 @@ impl R1CSShape { let num_vars_padded = m; let num_cons_padded = m; - let apply_pad = |mut M: SparseMatrix| -> SparseMatrix { + let apply_pad = |mut M: SparseMatrix| -> SparseMatrix { M.indices.par_iter_mut().for_each(|c| { if *c >= self.num_vars { *c += num_vars_padded - self.num_vars @@ -384,9 +384,9 @@ impl R1CSShape { } } -impl R1CSWitness { +impl R1CSWitness { /// A method to create a witness object using a vector of scalars - pub fn new(S: &R1CSShape, W: &[G::Scalar]) -> Result, NovaError> { + pub fn new(S: &R1CSShape, W: &[E::Scalar]) -> Result, NovaError> { if S.num_vars != W.len() { Err(NovaError::InvalidWitnessLength) } else { @@ -395,18 +395,18 @@ impl R1CSWitness { } /// Commits to the witness using the supplied generators - pub fn commit(&self, ck: &CommitmentKey) -> Commitment { - CE::::commit(ck, &self.W) + pub fn commit(&self, ck: &CommitmentKey) -> Commitment { + CE::::commit(ck, &self.W) } } -impl R1CSInstance { +impl R1CSInstance { /// A method to create an instance object using consitituent elements pub fn new( - S: &R1CSShape, - comm_W: &Commitment, - X: &[G::Scalar], - ) -> Result, NovaError> { + S: &R1CSShape, + comm_W: &Commitment, + X: &[E::Scalar], + ) -> Result, NovaError> { if S.num_io != X.len() { Err(NovaError::InvalidInputLength) } else { @@ -418,44 +418,44 @@ impl R1CSInstance { } } -impl AbsorbInROTrait for R1CSInstance { - fn absorb_in_ro(&self, ro: &mut G::RO) { +impl AbsorbInROTrait for R1CSInstance { + fn absorb_in_ro(&self, ro: &mut E::RO) { self.comm_W.absorb_in_ro(ro); for x in &self.X { - ro.absorb(scalar_as_base::(*x)); + ro.absorb(scalar_as_base::(*x)); } } } -impl RelaxedR1CSWitness { +impl RelaxedR1CSWitness { /// Produces a default `RelaxedR1CSWitness` given an `R1CSShape` - pub fn default(S: &R1CSShape) -> RelaxedR1CSWitness { + pub fn default(S: &R1CSShape) -> RelaxedR1CSWitness { RelaxedR1CSWitness { - W: vec![G::Scalar::ZERO; S.num_vars], - E: vec![G::Scalar::ZERO; S.num_cons], + W: vec![E::Scalar::ZERO; S.num_vars], + E: vec![E::Scalar::ZERO; S.num_cons], } } /// Initializes a new `RelaxedR1CSWitness` from an `R1CSWitness` - pub fn from_r1cs_witness(S: &R1CSShape, witness: &R1CSWitness) -> RelaxedR1CSWitness { + pub fn from_r1cs_witness(S: &R1CSShape, witness: &R1CSWitness) -> RelaxedR1CSWitness { RelaxedR1CSWitness { W: witness.W.clone(), - E: vec![G::Scalar::ZERO; S.num_cons], + E: vec![E::Scalar::ZERO; S.num_cons], } } /// Commits to the witness using the supplied generators - pub fn commit(&self, ck: &CommitmentKey) -> (Commitment, Commitment) { - (CE::::commit(ck, &self.W), CE::::commit(ck, &self.E)) + pub fn commit(&self, ck: &CommitmentKey) -> (Commitment, Commitment) { + (CE::::commit(ck, &self.W), CE::::commit(ck, &self.E)) } /// Folds an incoming `R1CSWitness` into the current one pub fn fold( &self, - W2: &R1CSWitness, - T: &[G::Scalar], - r: &G::Scalar, - ) -> Result, NovaError> { + W2: &R1CSWitness, + T: &[E::Scalar], + r: &E::Scalar, + ) -> Result, NovaError> { let (W1, E1) = (&self.W, &self.E); let W2 = &W2.W; @@ -467,61 +467,61 @@ impl RelaxedR1CSWitness { .par_iter() .zip(W2) .map(|(a, b)| *a + *r * *b) - .collect::>(); + .collect::>(); let E = E1 .par_iter() .zip(T) .map(|(a, b)| *a + *r * *b) - .collect::>(); + .collect::>(); Ok(RelaxedR1CSWitness { W, E }) } /// Pads the provided witness to the correct length - pub fn pad(&self, S: &R1CSShape) -> RelaxedR1CSWitness { + pub fn pad(&self, S: &R1CSShape) -> RelaxedR1CSWitness { let mut W = self.W.clone(); - W.extend(vec![G::Scalar::ZERO; S.num_vars - W.len()]); + W.extend(vec![E::Scalar::ZERO; S.num_vars - W.len()]); let mut E = self.E.clone(); - E.extend(vec![G::Scalar::ZERO; S.num_cons - E.len()]); + E.extend(vec![E::Scalar::ZERO; S.num_cons - E.len()]); Self { W, E } } } -impl RelaxedR1CSInstance { +impl RelaxedR1CSInstance { /// Produces a default `RelaxedR1CSInstance` given `R1CSGens` and `R1CSShape` - pub fn default(_ck: &CommitmentKey, S: &R1CSShape) -> RelaxedR1CSInstance { - let (comm_W, comm_E) = (Commitment::::default(), Commitment::::default()); + pub fn default(_ck: &CommitmentKey, S: &R1CSShape) -> RelaxedR1CSInstance { + let (comm_W, comm_E) = (Commitment::::default(), Commitment::::default()); RelaxedR1CSInstance { comm_W, comm_E, - u: G::Scalar::ZERO, - X: vec![G::Scalar::ZERO; S.num_io], + u: E::Scalar::ZERO, + X: vec![E::Scalar::ZERO; S.num_io], } } /// Initializes a new `RelaxedR1CSInstance` from an `R1CSInstance` pub fn from_r1cs_instance( - ck: &CommitmentKey, - S: &R1CSShape, - instance: &R1CSInstance, - ) -> RelaxedR1CSInstance { + ck: &CommitmentKey, + S: &R1CSShape, + instance: &R1CSInstance, + ) -> RelaxedR1CSInstance { let mut r_instance = RelaxedR1CSInstance::default(ck, S); r_instance.comm_W = instance.comm_W; - r_instance.u = G::Scalar::ONE; + r_instance.u = E::Scalar::ONE; r_instance.X = instance.X.clone(); r_instance } /// Initializes a new `RelaxedR1CSInstance` from an `R1CSInstance` pub fn from_r1cs_instance_unchecked( - comm_W: &Commitment, - X: &[G::Scalar], - ) -> RelaxedR1CSInstance { + comm_W: &Commitment, + X: &[E::Scalar], + ) -> RelaxedR1CSInstance { RelaxedR1CSInstance { comm_W: *comm_W, - comm_E: Commitment::::default(), - u: G::Scalar::ONE, + comm_E: Commitment::::default(), + u: E::Scalar::ONE, X: X.to_vec(), } } @@ -529,10 +529,10 @@ impl RelaxedR1CSInstance { /// Folds an incoming `RelaxedR1CSInstance` into the current one pub fn fold( &self, - U2: &R1CSInstance, - comm_T: &Commitment, - r: &G::Scalar, - ) -> RelaxedR1CSInstance { + U2: &R1CSInstance, + comm_T: &Commitment, + r: &E::Scalar, + ) -> RelaxedR1CSInstance { let (X1, u1, comm_W_1, comm_E_1) = (&self.X, &self.u, &self.comm_W.clone(), &self.comm_E.clone()); let (X2, comm_W_2) = (&U2.X, &U2.comm_W); @@ -542,7 +542,7 @@ impl RelaxedR1CSInstance { .par_iter() .zip(X2) .map(|(a, b)| *a + *r * *b) - .collect::>(); + .collect::>(); let comm_W = *comm_W_1 + *comm_W_2 * *r; let comm_E = *comm_E_1 + *comm_T * *r; let u = *u1 + *r; @@ -556,7 +556,7 @@ impl RelaxedR1CSInstance { } } -impl TranscriptReprTrait for RelaxedR1CSInstance { +impl TranscriptReprTrait for RelaxedR1CSInstance { fn to_transcript_bytes(&self) -> Vec { [ self.comm_W.to_transcript_bytes(), @@ -568,17 +568,17 @@ impl TranscriptReprTrait for RelaxedR1CSInstance { } } -impl AbsorbInROTrait for RelaxedR1CSInstance { - fn absorb_in_ro(&self, ro: &mut G::RO) { +impl AbsorbInROTrait for RelaxedR1CSInstance { + fn absorb_in_ro(&self, ro: &mut E::RO) { self.comm_W.absorb_in_ro(ro); self.comm_E.absorb_in_ro(ro); - ro.absorb(scalar_as_base::(self.u)); + ro.absorb(scalar_as_base::(self.u)); // absorb each element of self.X in bignum format for x in &self.X { - let limbs: Vec = nat_to_limbs(&f_to_nat(x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); + let limbs: Vec = nat_to_limbs(&f_to_nat(x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); for limb in limbs { - ro.absorb(scalar_as_base::(limb)); + ro.absorb(scalar_as_base::(limb)); } } } @@ -589,10 +589,14 @@ mod tests { use ff::Field; use super::*; - use crate::{r1cs::sparse::SparseMatrix, traits::Group}; - - fn tiny_r1cs(num_vars: usize) -> R1CSShape { - let one = ::ONE; + use crate::{ + provider::{bn256_grumpkin::Bn256Engine, pasta::PallasEngine, secp_secq::Secp256k1Engine}, + r1cs::sparse::SparseMatrix, + traits::Engine, + }; + + fn tiny_r1cs(num_vars: usize) -> R1CSShape { + let one = ::ONE; let (num_cons, num_vars, num_io, A, B, C) = { let num_cons = 4; let num_io = 2; @@ -608,9 +612,9 @@ mod tests { // constraint and a column for every entry in z = (vars, u, inputs) // An R1CS instance is satisfiable iff: // Az \circ Bz = u \cdot Cz + E, where z = (vars, 1, inputs) - let mut A: Vec<(usize, usize, G::Scalar)> = Vec::new(); - let mut B: Vec<(usize, usize, G::Scalar)> = Vec::new(); - let mut C: Vec<(usize, usize, G::Scalar)> = Vec::new(); + let mut A: Vec<(usize, usize, E::Scalar)> = Vec::new(); + let mut B: Vec<(usize, usize, E::Scalar)> = Vec::new(); + let mut C: Vec<(usize, usize, E::Scalar)> = Vec::new(); // constraint 0 entries in (A,B,C) // `I0 * I0 - Z0 = 0` @@ -657,19 +661,19 @@ mod tests { res.unwrap() } - fn test_pad_tiny_r1cs_with() { - let padded_r1cs = tiny_r1cs::(3).pad(); + fn test_pad_tiny_r1cs_with() { + let padded_r1cs = tiny_r1cs::(3).pad(); assert!(padded_r1cs.is_regular_shape()); - let expected_r1cs = tiny_r1cs::(4); + let expected_r1cs = tiny_r1cs::(4); assert_eq!(padded_r1cs, expected_r1cs); } #[test] fn test_pad_tiny_r1cs() { - test_pad_tiny_r1cs_with::(); - test_pad_tiny_r1cs_with::(); - test_pad_tiny_r1cs_with::(); + test_pad_tiny_r1cs_with::(); + test_pad_tiny_r1cs_with::(); + test_pad_tiny_r1cs_with::(); } } diff --git a/src/r1cs/sparse.rs b/src/r1cs/sparse.rs index 6c22b67ca..530378df5 100644 --- a/src/r1cs/sparse.rs +++ b/src/r1cs/sparse.rs @@ -184,15 +184,18 @@ impl<'a, F: PrimeField> Iterator for Iter<'a, F> { #[cfg(test)] mod tests { - use crate::{r1cs::util::FWrap, traits::Group}; - use super::SparseMatrix; - use pasta_curves::pallas::Point as G; + use crate::{ + provider::pasta::PallasEngine, + r1cs::util::FWrap, + traits::{Engine, Group}, + }; use proptest::{ prelude::*, strategy::{BoxedStrategy, Just, Strategy}, }; + type G = ::GE; type Fr = ::Scalar; #[test] diff --git a/src/spartan/direct.rs b/src/spartan/direct.rs index 0b19a78fa..b57d68121 100644 --- a/src/spartan/direct.rs +++ b/src/spartan/direct.rs @@ -12,7 +12,7 @@ use crate::{ traits::{ circuit::StepCircuit, snark::{DigestHelperTrait, RelaxedR1CSSNARKTrait}, - Group, + Engine, }, Commitment, CommitmentKey, }; @@ -21,25 +21,25 @@ use core::marker::PhantomData; use ff::Field; use serde::{Deserialize, Serialize}; -struct DirectCircuit> { - z_i: Option>, // inputs to the circuit +struct DirectCircuit> { + z_i: Option>, // inputs to the circuit sc: SC, // step circuit to be executed } -impl> Circuit for DirectCircuit { - fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { +impl> Circuit for DirectCircuit { + fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { // obtain the arity information let arity = self.sc.arity(); // Allocate zi. If inputs.zi is not provided, allocate default value 0 - let zero = vec![G::Scalar::ZERO; arity]; + let zero = vec![E::Scalar::ZERO; arity]; let z_i = (0..arity) .map(|i| { AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { Ok(self.z_i.as_ref().unwrap_or(&zero)[i]) }) }) - .collect::>, _>>()?; + .collect::>, _>>()?; let z_i_plus_one = self.sc.synthesize(&mut cs.namespace(|| "F"), &z_i)?; @@ -58,30 +58,30 @@ impl> Circuit for DirectCircuit< /// A type that holds the prover key #[derive(Clone, Serialize, Deserialize)] #[serde(bound = "")] -pub struct ProverKey +pub struct ProverKey where - G: Group, - S: RelaxedR1CSSNARKTrait, + E: Engine, + S: RelaxedR1CSSNARKTrait, { - S: R1CSShape, - ck: CommitmentKey, + S: R1CSShape, + ck: CommitmentKey, pk: S::ProverKey, } /// A type that holds the verifier key #[derive(Clone, Serialize, Deserialize)] #[serde(bound = "")] -pub struct VerifierKey +pub struct VerifierKey where - G: Group, - S: RelaxedR1CSSNARKTrait, + E: Engine, + S: RelaxedR1CSSNARKTrait, { vk: S::VerifierKey, } -impl> VerifierKey { +impl> VerifierKey { /// Returns the digest of the verifier's key - pub fn digest(&self) -> G::Scalar { + pub fn digest(&self) -> E::Scalar { self.vk.digest() } } @@ -89,24 +89,24 @@ impl> VerifierKey { /// A direct SNARK proving a step circuit #[derive(Clone, Serialize, Deserialize)] #[serde(bound = "")] -pub struct DirectSNARK +pub struct DirectSNARK where - G: Group, - S: RelaxedR1CSSNARKTrait, - C: StepCircuit, + E: Engine, + S: RelaxedR1CSSNARKTrait, + C: StepCircuit, { - comm_W: Commitment, // commitment to the witness + comm_W: Commitment, // commitment to the witness snark: S, // snark proving the witness is satisfying _p: PhantomData, } -impl, C: StepCircuit> DirectSNARK { +impl, C: StepCircuit> DirectSNARK { /// Produces prover and verifier keys for the direct SNARK - pub fn setup(sc: C) -> Result<(ProverKey, VerifierKey), NovaError> { + pub fn setup(sc: C) -> Result<(ProverKey, VerifierKey), NovaError> { // construct a circuit that can be synthesized - let circuit: DirectCircuit = DirectCircuit { z_i: None, sc }; + let circuit: DirectCircuit = DirectCircuit { z_i: None, sc }; - let mut cs: ShapeCS = ShapeCS::new(); + let mut cs: ShapeCS = ShapeCS::new(); let _ = circuit.synthesize(&mut cs); let (shape, ck) = cs.r1cs_shape_and_key(&*S::ck_floor()); @@ -120,10 +120,10 @@ impl, C: StepCircuit> DirectSNA } /// Produces a proof of satisfiability of the provided circuit - pub fn prove(pk: &ProverKey, sc: C, z_i: &[G::Scalar]) -> Result { - let mut cs = SatisfyingAssignment::::new(); + pub fn prove(pk: &ProverKey, sc: C, z_i: &[E::Scalar]) -> Result { + let mut cs = SatisfyingAssignment::::new(); - let circuit: DirectCircuit = DirectCircuit { + let circuit: DirectCircuit = DirectCircuit { z_i: Some(z_i.to_vec()), sc, }; @@ -150,7 +150,7 @@ impl, C: StepCircuit> DirectSNA } /// Verifies a proof of satisfiability - pub fn verify(&self, vk: &VerifierKey, io: &[G::Scalar]) -> Result<(), NovaError> { + pub fn verify(&self, vk: &VerifierKey, io: &[E::Scalar]) -> Result<(), NovaError> { // construct an instance using the provided commitment to the witness and z_i and z_{i+1} let u_relaxed = RelaxedR1CSInstance::from_r1cs_instance_unchecked(&self.comm_W, io); @@ -164,7 +164,9 @@ impl, C: StepCircuit> DirectSNA #[cfg(test)] mod tests { use super::*; - use crate::provider::{bn256_grumpkin::bn256, secp_secq::secp256k1}; + use crate::provider::{ + bn256_grumpkin::Bn256Engine, pasta::PallasEngine, secp_secq::Secp256k1Engine, + }; use ::bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; use core::marker::PhantomData; use ff::PrimeField; @@ -174,10 +176,7 @@ mod tests { _p: PhantomData, } - impl StepCircuit for CubicCircuit - where - F: PrimeField, - { + impl StepCircuit for CubicCircuit { fn arity(&self) -> usize { 1 } @@ -222,42 +221,42 @@ mod tests { #[test] fn test_direct_snark() { - type G = pasta_curves::pallas::Point; - type EE = crate::provider::ipa_pc::EvaluationEngine; - type S = crate::spartan::snark::RelaxedR1CSSNARK; - test_direct_snark_with::(); + type E = PallasEngine; + type EE = crate::provider::ipa_pc::EvaluationEngine; + type S = crate::spartan::snark::RelaxedR1CSSNARK; + test_direct_snark_with::(); - type Spp = crate::spartan::ppsnark::RelaxedR1CSSNARK; - test_direct_snark_with::(); + type Spp = crate::spartan::ppsnark::RelaxedR1CSSNARK; + test_direct_snark_with::(); - type G2 = bn256::Point; - type EE2 = crate::provider::ipa_pc::EvaluationEngine; - type S2 = crate::spartan::snark::RelaxedR1CSSNARK; - test_direct_snark_with::(); + type E2 = Bn256Engine; + type EE2 = crate::provider::ipa_pc::EvaluationEngine; + type S2 = crate::spartan::snark::RelaxedR1CSSNARK; + test_direct_snark_with::(); - type S2pp = crate::spartan::ppsnark::RelaxedR1CSSNARK; - test_direct_snark_with::(); + type S2pp = crate::spartan::ppsnark::RelaxedR1CSSNARK; + test_direct_snark_with::(); - type G3 = secp256k1::Point; - type EE3 = crate::provider::ipa_pc::EvaluationEngine; - type S3 = crate::spartan::snark::RelaxedR1CSSNARK; - test_direct_snark_with::(); + type E3 = Secp256k1Engine; + type EE3 = crate::provider::ipa_pc::EvaluationEngine; + type S3 = crate::spartan::snark::RelaxedR1CSSNARK; + test_direct_snark_with::(); - type S3pp = crate::spartan::ppsnark::RelaxedR1CSSNARK; - test_direct_snark_with::(); + type S3pp = crate::spartan::ppsnark::RelaxedR1CSSNARK; + test_direct_snark_with::(); } - fn test_direct_snark_with>() { + fn test_direct_snark_with>() { let circuit = CubicCircuit::default(); // produce keys let (pk, vk) = - DirectSNARK::::Scalar>>::setup(circuit.clone()).unwrap(); + DirectSNARK::::Scalar>>::setup(circuit.clone()).unwrap(); let num_steps = 3; // setup inputs - let z0 = vec![::Scalar::ZERO]; + let z0 = vec![::Scalar::ZERO]; let mut z_i = z0; for _i in 0..num_steps { @@ -283,6 +282,6 @@ mod tests { } // sanity: check the claimed output with a direct computation of the same - assert_eq!(z_i, vec![::Scalar::from(2460515u64)]); + assert_eq!(z_i, vec![::Scalar::from(2460515u64)]); } } diff --git a/src/spartan/mod.rs b/src/spartan/mod.rs index 4578e0770..3e7c514e8 100644 --- a/src/spartan/mod.rs +++ b/src/spartan/mod.rs @@ -12,15 +12,15 @@ pub mod ppsnark; pub mod snark; mod sumcheck; -use crate::{traits::Group, Commitment}; +use crate::{traits::Engine, Commitment}; use ff::Field; use polys::multilinear::SparsePolynomial; use rayon::{iter::IntoParallelRefIterator, prelude::*}; -fn powers(s: &G::Scalar, n: usize) -> Vec { +fn powers(s: &E::Scalar, n: usize) -> Vec { assert!(n >= 1); let mut powers = Vec::new(); - powers.push(G::Scalar::ONE); + powers.push(E::Scalar::ONE); for i in 1..n { powers.push(powers[i - 1] * s); } @@ -28,16 +28,16 @@ fn powers(s: &G::Scalar, n: usize) -> Vec { } /// A type that holds a witness to a polynomial evaluation instance -pub struct PolyEvalWitness { - p: Vec, // polynomial +pub struct PolyEvalWitness { + p: Vec, // polynomial } -impl PolyEvalWitness { - fn pad(mut W: Vec>) -> Vec> { +impl PolyEvalWitness { + fn pad(mut W: Vec>) -> Vec> { // determine the maximum size if let Some(n) = W.iter().map(|w| w.p.len()).max() { W.iter_mut().for_each(|w| { - w.p.resize(n, G::Scalar::ZERO); + w.p.resize(n, E::Scalar::ZERO); }); W } else { @@ -45,9 +45,9 @@ impl PolyEvalWitness { } } - fn weighted_sum(W: &[PolyEvalWitness], s: &[G::Scalar]) -> PolyEvalWitness { + fn weighted_sum(W: &[PolyEvalWitness], s: &[E::Scalar]) -> PolyEvalWitness { assert_eq!(W.len(), s.len()); - let mut p = vec![G::Scalar::ZERO; W[0].p.len()]; + let mut p = vec![E::Scalar::ZERO; W[0].p.len()]; for i in 0..W.len() { for j in 0..W[i].p.len() { p[j] += W[i].p[j] * s[i] @@ -57,22 +57,22 @@ impl PolyEvalWitness { } // This method panics unless all vectors in p_vec are of the same length - fn batch(p_vec: &[&Vec], s: &G::Scalar) -> PolyEvalWitness { + fn batch(p_vec: &[&Vec], s: &E::Scalar) -> PolyEvalWitness { p_vec .iter() .for_each(|p| assert_eq!(p.len(), p_vec[0].len())); - let powers_of_s = powers::(s, p_vec.len()); + let powers_of_s = powers::(s, p_vec.len()); let p = p_vec .par_iter() .zip(powers_of_s.par_iter()) .map(|(v, &weight)| { // compute the weighted sum for each vector - v.iter().map(|&x| x * weight).collect::>() + v.iter().map(|&x| x * weight).collect::>() }) .reduce( - || vec![G::Scalar::ZERO; p_vec[0].len()], + || vec![E::Scalar::ZERO; p_vec[0].len()], |acc, v| { // perform vector addition to combine the weighted vectors acc.into_iter().zip(v).map(|(x, y)| x + y).collect() @@ -84,19 +84,19 @@ impl PolyEvalWitness { } /// A type that holds a polynomial evaluation instance -pub struct PolyEvalInstance { - c: Commitment, // commitment to the polynomial - x: Vec, // evaluation point - e: G::Scalar, // claimed evaluation +pub struct PolyEvalInstance { + c: Commitment, // commitment to the polynomial + x: Vec, // evaluation point + e: E::Scalar, // claimed evaluation } -impl PolyEvalInstance { - fn pad(U: Vec>) -> Vec> { +impl PolyEvalInstance { + fn pad(U: Vec>) -> Vec> { // determine the maximum size if let Some(ell) = U.iter().map(|u| u.x.len()).max() { U.into_iter() .map(|mut u| { - let mut x = vec![G::Scalar::ZERO; ell - u.x.len()]; + let mut x = vec![E::Scalar::ZERO; ell - u.x.len()]; x.append(&mut u.x); PolyEvalInstance { x, ..u } }) @@ -107,12 +107,12 @@ impl PolyEvalInstance { } fn batch( - c_vec: &[Commitment], - x: &[G::Scalar], - e_vec: &[G::Scalar], - s: &G::Scalar, - ) -> PolyEvalInstance { - let powers_of_s = powers::(s, c_vec.len()); + c_vec: &[Commitment], + x: &[E::Scalar], + e_vec: &[E::Scalar], + s: &E::Scalar, + ) -> PolyEvalInstance { + let powers_of_s = powers::(s, c_vec.len()); let e = e_vec .par_iter() .zip(powers_of_s.par_iter()) @@ -122,7 +122,7 @@ impl PolyEvalInstance { .par_iter() .zip(powers_of_s.par_iter()) .map(|(c, p)| *c * *p) - .reduce(Commitment::::default, |acc, item| acc + item); + .reduce(Commitment::::default, |acc, item| acc + item); PolyEvalInstance { c, diff --git a/src/spartan/polys/univariate.rs b/src/spartan/polys/univariate.rs index afc500877..bfe983e5b 100644 --- a/src/spartan/polys/univariate.rs +++ b/src/spartan/polys/univariate.rs @@ -109,7 +109,10 @@ impl CompressedUniPoly { impl TranscriptReprTrait for UniPoly { fn to_transcript_bytes(&self) -> Vec { let coeffs = self.compress().coeffs_except_linear_term; - coeffs.as_slice().to_transcript_bytes() + coeffs + .iter() + .flat_map(|&t| t.to_repr().as_ref().to_vec()) + .collect::>() } } #[cfg(test)] diff --git a/src/spartan/ppsnark.rs b/src/spartan/ppsnark.rs index b3a83e092..1fc017d84 100644 --- a/src/spartan/ppsnark.rs +++ b/src/spartan/ppsnark.rs @@ -24,7 +24,7 @@ use crate::{ commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, evaluation::EvaluationEngineTrait, snark::{DigestHelperTrait, RelaxedR1CSSNARKTrait}, - Group, TranscriptEngineTrait, TranscriptReprTrait, + Engine, TranscriptEngineTrait, TranscriptReprTrait, }, Commitment, CommitmentKey, CompressedCommitment, }; @@ -36,7 +36,7 @@ use once_cell::sync::OnceCell; use rayon::prelude::*; use serde::{Deserialize, Serialize}; -fn padded(v: &[G::Scalar], n: usize, e: &G::Scalar) -> Vec { +fn padded(v: &[E::Scalar], n: usize, e: &E::Scalar) -> Vec { let mut v_padded = vec![*e; n]; for (i, v_i) in v.iter().enumerate() { v_padded[i] = *v_i; @@ -47,49 +47,49 @@ fn padded(v: &[G::Scalar], n: usize, e: &G::Scalar) -> Vec /// A type that holds `R1CSShape` in a form amenable to memory checking #[derive(Clone, Serialize, Deserialize, Abomonation)] #[serde(bound = "")] -#[abomonation_bounds(where ::Repr: Abomonation)] -pub struct R1CSShapeSparkRepr { +#[abomonation_bounds(where ::Repr: Abomonation)] +pub struct R1CSShapeSparkRepr { N: usize, // size of the vectors // dense representation - #[abomonate_with(Vec<::Repr>)] - row: Vec, - #[abomonate_with(Vec<::Repr>)] - col: Vec, - #[abomonate_with(Vec<::Repr>)] - val_A: Vec, - #[abomonate_with(Vec<::Repr>)] - val_B: Vec, - #[abomonate_with(Vec<::Repr>)] - val_C: Vec, + #[abomonate_with(Vec<::Repr>)] + row: Vec, + #[abomonate_with(Vec<::Repr>)] + col: Vec, + #[abomonate_with(Vec<::Repr>)] + val_A: Vec, + #[abomonate_with(Vec<::Repr>)] + val_B: Vec, + #[abomonate_with(Vec<::Repr>)] + val_C: Vec, // timestamp polynomials - #[abomonate_with(Vec<::Repr>)] - ts_row: Vec, - #[abomonate_with(Vec<::Repr>)] - ts_col: Vec, + #[abomonate_with(Vec<::Repr>)] + ts_row: Vec, + #[abomonate_with(Vec<::Repr>)] + ts_col: Vec, } /// A type that holds a commitment to a sparse polynomial #[derive(Clone, Serialize, Deserialize, Abomonation)] #[serde(bound = "")] -#[abomonation_bounds(where ::Repr: Abomonation)] -pub struct R1CSShapeSparkCommitment { +#[abomonation_bounds(where ::Repr: Abomonation)] +pub struct R1CSShapeSparkCommitment { N: usize, // size of each vector // commitments to the dense representation - comm_row: Commitment, - comm_col: Commitment, - comm_val_A: Commitment, - comm_val_B: Commitment, - comm_val_C: Commitment, + comm_row: Commitment, + comm_col: Commitment, + comm_val_A: Commitment, + comm_val_B: Commitment, + comm_val_C: Commitment, // commitments to the timestamp polynomials - comm_ts_row: Commitment, - comm_ts_col: Commitment, + comm_ts_row: Commitment, + comm_ts_col: Commitment, } -impl TranscriptReprTrait for R1CSShapeSparkCommitment { +impl TranscriptReprTrait for R1CSShapeSparkCommitment { fn to_transcript_bytes(&self) -> Vec { [ self.comm_row, @@ -105,9 +105,9 @@ impl TranscriptReprTrait for R1CSShapeSparkCommitment { } } -impl R1CSShapeSparkRepr { +impl R1CSShapeSparkRepr { /// represents `R1CSShape` in a Spark-friendly format amenable to memory checking - pub fn new(S: &R1CSShape) -> R1CSShapeSparkRepr { + pub fn new(S: &R1CSShape) -> R1CSShapeSparkRepr { let N = { let total_nz = S.A.len() + S.B.len() + S.C.len(); max(total_nz, max(2 * S.num_vars, S.num_cons)).next_power_of_two() @@ -120,7 +120,7 @@ impl R1CSShapeSparkRepr { col[i] = c; } let val_A = { - let mut val = vec![G::Scalar::ZERO; N]; + let mut val = vec![E::Scalar::ZERO; N]; for (i, (_, _, v)) in S.A.iter().enumerate() { val[i] = v; } @@ -128,7 +128,7 @@ impl R1CSShapeSparkRepr { }; let val_B = { - let mut val = vec![G::Scalar::ZERO; N]; + let mut val = vec![E::Scalar::ZERO; N]; for (i, (_, _, v)) in S.B.iter().enumerate() { val[S.A.len() + i] = v; } @@ -136,7 +136,7 @@ impl R1CSShapeSparkRepr { }; let val_C = { - let mut val = vec![G::Scalar::ZERO; N]; + let mut val = vec![E::Scalar::ZERO; N]; for (i, (_, _, v)) in S.C.iter().enumerate() { val[S.A.len() + S.B.len() + i] = v; } @@ -160,10 +160,10 @@ impl R1CSShapeSparkRepr { rayon::join(|| timestamp_calc(N, N, &row), || timestamp_calc(N, N, &col)); // a routine to turn a vector of usize into a vector scalars - let to_vec_scalar = |v: &[usize]| -> Vec { + let to_vec_scalar = |v: &[usize]| -> Vec { (0..v.len()) - .map(|i| G::Scalar::from(v[i] as u64)) - .collect::>() + .map(|i| E::Scalar::from(v[i] as u64)) + .collect::>() }; R1CSShapeSparkRepr { @@ -182,8 +182,8 @@ impl R1CSShapeSparkRepr { } } - fn commit(&self, ck: &CommitmentKey) -> R1CSShapeSparkCommitment { - let comm_vec: Vec> = [ + fn commit(&self, ck: &CommitmentKey) -> R1CSShapeSparkCommitment { + let comm_vec: Vec> = [ &self.row, &self.col, &self.val_A, @@ -193,7 +193,7 @@ impl R1CSShapeSparkRepr { &self.ts_col, ] .par_iter() - .map(|v| G::CE::commit(ck, v)) + .map(|v| E::CE::commit(ck, v)) .collect(); R1CSShapeSparkCommitment { @@ -211,17 +211,17 @@ impl R1CSShapeSparkRepr { // computes evaluation oracles fn evaluation_oracles( &self, - S: &R1CSShape, - r_x: &G::Scalar, - z: &[G::Scalar], + S: &R1CSShape, + r_x: &E::Scalar, + z: &[E::Scalar], ) -> ( - Vec, - Vec, - Vec, - Vec, + Vec, + Vec, + Vec, + Vec, ) { let mem_row = PowPolynomial::new(r_x, self.N.log_2()).evals(); - let mem_col = padded::(z, self.N, &G::Scalar::ZERO); + let mem_col = padded::(z, self.N, &E::Scalar::ZERO); let (L_row, L_col) = { let mut L_row = vec![mem_row[0]; self.N]; // we place mem_row[0] since resized row is appended with 0s @@ -246,9 +246,9 @@ impl R1CSShapeSparkRepr { } /// Defines a trait for implementing sum-check in a generic manner -pub trait SumcheckEngine: Send + Sync { +pub trait SumcheckEngine: Send + Sync { /// returns the initial claims - fn initial_claims(&self) -> Vec; + fn initial_claims(&self) -> Vec; /// degree of the sum-check polynomial fn degree(&self) -> usize; @@ -257,52 +257,52 @@ pub trait SumcheckEngine: Send + Sync { fn size(&self) -> usize; /// returns evaluation points at 0, 2, d-1 (where d is the degree of the sum-check polynomial) - fn evaluation_points(&self) -> Vec>; + fn evaluation_points(&self) -> Vec>; /// bounds a variable in the constituent polynomials - fn bound(&mut self, r: &G::Scalar); + fn bound(&mut self, r: &E::Scalar); /// returns the final claims - fn final_claims(&self) -> Vec>; + fn final_claims(&self) -> Vec>; } -struct MemorySumcheckInstance { +struct MemorySumcheckInstance { // row - w_plus_r_row: MultilinearPolynomial, - t_plus_r_row: MultilinearPolynomial, - t_plus_r_inv_row: MultilinearPolynomial, - w_plus_r_inv_row: MultilinearPolynomial, - ts_row: MultilinearPolynomial, + w_plus_r_row: MultilinearPolynomial, + t_plus_r_row: MultilinearPolynomial, + t_plus_r_inv_row: MultilinearPolynomial, + w_plus_r_inv_row: MultilinearPolynomial, + ts_row: MultilinearPolynomial, // col - w_plus_r_col: MultilinearPolynomial, - t_plus_r_col: MultilinearPolynomial, - t_plus_r_inv_col: MultilinearPolynomial, - w_plus_r_inv_col: MultilinearPolynomial, - ts_col: MultilinearPolynomial, + w_plus_r_col: MultilinearPolynomial, + t_plus_r_col: MultilinearPolynomial, + t_plus_r_inv_col: MultilinearPolynomial, + w_plus_r_inv_col: MultilinearPolynomial, + ts_col: MultilinearPolynomial, // eq - poly_eq: MultilinearPolynomial, + poly_eq: MultilinearPolynomial, // zero polynomial - poly_zero: MultilinearPolynomial, + poly_zero: MultilinearPolynomial, } -impl MemorySumcheckInstance { +impl MemorySumcheckInstance { pub fn new( - ck: &CommitmentKey, - r: &G::Scalar, - T_row: &[G::Scalar], - W_row: &[G::Scalar], - ts_row: Vec, - T_col: &[G::Scalar], - W_col: &[G::Scalar], - ts_col: Vec, - transcript: &mut G::TE, - ) -> Result<(Self, [Commitment; 4], [Vec; 4]), NovaError> { - let batch_invert = |v: &[G::Scalar]| -> Result, NovaError> { - let mut products = vec![G::Scalar::ZERO; v.len()]; - let mut acc = G::Scalar::ONE; + ck: &CommitmentKey, + r: & E::Scalar, + T_row: &[E::Scalar], + W_row: &[E::Scalar], + ts_row: Vec, + T_col: &[E::Scalar], + W_col: &[E::Scalar], + ts_col: Vec, + transcript: &mut E::TE, + ) -> Result<(Self, [Commitment; 4], [Vec; 4]), NovaError> { + let batch_invert = |v: &[E::Scalar]| -> Result, NovaError> { + let mut products = vec![E::Scalar::ZERO; v.len()]; + let mut acc = E::Scalar::ONE; for i in 0..v.len() { products[i] = acc; @@ -310,14 +310,14 @@ impl MemorySumcheckInstance { } // we can compute an inversion only if acc is non-zero - if acc == G::Scalar::ZERO { + if acc == E::Scalar::ZERO { return Err(NovaError::InternalError); } // compute the inverse once for all entries acc = acc.invert().unwrap(); - let mut inv = vec![G::Scalar::ZERO; v.len()]; + let mut inv = vec![E::Scalar::ZERO; v.len()]; for i in 0..v.len() { let tmp = acc * v[v.len() - 1 - i]; inv[v.len() - 1 - i] = products[v.len() - 1 - i] * acc; @@ -328,25 +328,25 @@ impl MemorySumcheckInstance { }; // compute vectors TS[i]/(T[i] + r) and 1/(W[i] + r) - let helper = |T: &[G::Scalar], - W: &[G::Scalar], - TS: &[G::Scalar], - r: &G::Scalar| + let helper = |T: &[E::Scalar], + W: &[E::Scalar], + TS: &[E::Scalar], + r: &E::Scalar| -> ( ( - Result, NovaError>, - Result, NovaError>, + Result, NovaError>, + Result, NovaError>, ), ( - Result, NovaError>, - Result, NovaError>, + Result, NovaError>, + Result, NovaError>, ), ) { rayon::join( || { rayon::join( || { - let inv = batch_invert(&T.par_iter().map(|e| *e + *r).collect::>())?; + let inv = batch_invert(&T.par_iter().map(|e| *e + *r).collect::>())?; // compute inv[i] * TS[i] in parallel Ok( @@ -357,13 +357,13 @@ impl MemorySumcheckInstance { .collect::>(), ) }, - || batch_invert(&W.par_iter().map(|e| *e + *r).collect::>()), + || batch_invert(&W.par_iter().map(|e| *e + *r).collect::>()), ) }, || { rayon::join( - || Ok(T.par_iter().map(|e| *e + *r).collect::>()), - || Ok(W.par_iter().map(|e| *e + *r).collect::>()), + || Ok(T.par_iter().map(|e| *e + *r).collect::>()), + || Ok(W.par_iter().map(|e| *e + *r).collect::>()), ) }, ) @@ -388,14 +388,14 @@ impl MemorySumcheckInstance { ) = rayon::join( || { rayon::join( - || G::CE::commit(ck, &t_plus_r_inv_row), - || G::CE::commit(ck, &w_plus_r_inv_row), + || E::CE::commit(ck, &t_plus_r_inv_row), + || E::CE::commit(ck, &w_plus_r_inv_row), ) }, || { rayon::join( - || G::CE::commit(ck, &t_plus_r_inv_col), - || G::CE::commit(ck, &w_plus_r_inv_col), + || E::CE::commit(ck, &t_plus_r_inv_col), + || E::CE::commit(ck, &w_plus_r_inv_col), ) }, ); @@ -429,7 +429,7 @@ impl MemorySumcheckInstance { w_plus_r_inv_col.clone(), ]; - let zero = vec![G::Scalar::ZERO; t_plus_r_inv_row.len()]; + let zero = vec![E::Scalar::ZERO; t_plus_r_inv_row.len()]; Ok(( Self { @@ -452,9 +452,9 @@ impl MemorySumcheckInstance { } } -impl SumcheckEngine for MemorySumcheckInstance { - fn initial_claims(&self) -> Vec { - vec![G::Scalar::ZERO; 6] +impl SumcheckEngine for MemorySumcheckInstance { + fn initial_claims(&self) -> Vec { + vec![E::Scalar::ZERO; 6] } fn degree(&self) -> usize { @@ -472,29 +472,29 @@ impl SumcheckEngine for MemorySumcheckInstance { self.w_plus_r_row.len() } - fn evaluation_points(&self) -> Vec> { - let comb_func = |poly_A_comp: &G::Scalar, - poly_B_comp: &G::Scalar, - _poly_C_comp: &G::Scalar| - -> G::Scalar { *poly_A_comp - *poly_B_comp }; + fn evaluation_points(&self) -> Vec> { + let comb_func = |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + _poly_C_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp - *poly_B_comp }; let comb_func2 = - |poly_A_comp: &G::Scalar, - poly_B_comp: &G::Scalar, - poly_C_comp: &G::Scalar, - _poly_D_comp: &G::Scalar| - -> G::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - G::Scalar::ONE) }; + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + _poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - E::Scalar::ONE) }; let comb_func3 = - |poly_A_comp: &G::Scalar, - poly_B_comp: &G::Scalar, - poly_C_comp: &G::Scalar, - poly_D_comp: &G::Scalar| - -> G::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; // inv related evaluation points let (eval_inv_0_row, eval_inv_2_row, eval_inv_3_row) = - SumcheckProof::::compute_eval_points_cubic( + SumcheckProof::::compute_eval_points_cubic( &self.t_plus_r_inv_row, &self.w_plus_r_inv_row, &self.poly_zero, @@ -502,7 +502,7 @@ impl SumcheckEngine for MemorySumcheckInstance { ); let (eval_inv_0_col, eval_inv_2_col, eval_inv_3_col) = - SumcheckProof::::compute_eval_points_cubic( + SumcheckProof::::compute_eval_points_cubic( &self.t_plus_r_inv_col, &self.w_plus_r_inv_col, &self.poly_zero, @@ -511,7 +511,7 @@ impl SumcheckEngine for MemorySumcheckInstance { // row related evaluation points let (eval_T_0_row, eval_T_2_row, eval_T_3_row) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( + SumcheckProof::::compute_eval_points_cubic_with_additive_term( &self.poly_eq, &self.t_plus_r_inv_row, &self.t_plus_r_row, @@ -519,7 +519,7 @@ impl SumcheckEngine for MemorySumcheckInstance { &comb_func3, ); let (eval_W_0_row, eval_W_2_row, eval_W_3_row) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( + SumcheckProof::::compute_eval_points_cubic_with_additive_term( &self.poly_eq, &self.w_plus_r_inv_row, &self.w_plus_r_row, @@ -529,7 +529,7 @@ impl SumcheckEngine for MemorySumcheckInstance { // column related evaluation points let (eval_T_0_col, eval_T_2_col, eval_T_3_col) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( + SumcheckProof::::compute_eval_points_cubic_with_additive_term( &self.poly_eq, &self.t_plus_r_inv_col, &self.t_plus_r_col, @@ -537,7 +537,7 @@ impl SumcheckEngine for MemorySumcheckInstance { &comb_func3, ); let (eval_W_0_col, eval_W_2_col, eval_W_3_col) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( + SumcheckProof::::compute_eval_points_cubic_with_additive_term( &self.poly_eq, &self.w_plus_r_inv_col, &self.w_plus_r_col, @@ -555,7 +555,7 @@ impl SumcheckEngine for MemorySumcheckInstance { ] } - fn bound(&mut self, r: &G::Scalar) { + fn bound(&mut self, r: &E::Scalar) { [ &mut self.t_plus_r_row, &mut self.t_plus_r_inv_row, @@ -573,7 +573,7 @@ impl SumcheckEngine for MemorySumcheckInstance { .for_each(|poly| poly.bind_poly_var_top(r)); } - fn final_claims(&self) -> Vec> { + fn final_claims(&self) -> Vec> { let poly_row_final = vec![ self.t_plus_r_inv_row[0], self.w_plus_r_inv_row[0], @@ -590,28 +590,28 @@ impl SumcheckEngine for MemorySumcheckInstance { } } -struct OuterSumcheckInstance { - poly_tau: MultilinearPolynomial, - poly_Az: MultilinearPolynomial, - poly_Bz: MultilinearPolynomial, - poly_uCz_E: MultilinearPolynomial, +struct OuterSumcheckInstance { + poly_tau: MultilinearPolynomial, + poly_Az: MultilinearPolynomial, + poly_Bz: MultilinearPolynomial, + poly_uCz_E: MultilinearPolynomial, - poly_Mz: MultilinearPolynomial, - eval_Mz_at_tau: G::Scalar, + poly_Mz: MultilinearPolynomial, + eval_Mz_at_tau: E::Scalar, - poly_zero: MultilinearPolynomial, + poly_zero: MultilinearPolynomial, } -impl OuterSumcheckInstance { +impl OuterSumcheckInstance { pub fn new( - tau: Vec, - Az: Vec, - Bz: Vec, - uCz_E: Vec, - Mz: Vec, - eval_Mz_at_tau: &G::Scalar, + tau: Vec, + Az: Vec, + Bz: Vec, + uCz_E: Vec, + Mz: Vec, + eval_Mz_at_tau: &E::Scalar, ) -> Self { - let zero = vec![G::Scalar::ZERO; tau.len()]; + let zero = vec![E::Scalar::ZERO; tau.len()]; Self { poly_tau: MultilinearPolynomial::new(tau), poly_Az: MultilinearPolynomial::new(Az), @@ -624,9 +624,9 @@ impl OuterSumcheckInstance { } } -impl SumcheckEngine for OuterSumcheckInstance { - fn initial_claims(&self) -> Vec { - vec![G::Scalar::ZERO, self.eval_Mz_at_tau] +impl SumcheckEngine for OuterSumcheckInstance { + fn initial_claims(&self) -> Vec { + vec![E::Scalar::ZERO, self.eval_Mz_at_tau] } fn degree(&self) -> usize { @@ -641,16 +641,16 @@ impl SumcheckEngine for OuterSumcheckInstance { self.poly_tau.len() } - fn evaluation_points(&self) -> Vec> { + fn evaluation_points(&self) -> Vec> { let comb_func = - |poly_A_comp: &G::Scalar, - poly_B_comp: &G::Scalar, - poly_C_comp: &G::Scalar, - poly_D_comp: &G::Scalar| - -> G::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; let (eval_point_h_0, eval_point_h_2, eval_point_h_3) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( + SumcheckProof::::compute_eval_points_cubic_with_additive_term( &self.poly_tau, &self.poly_Az, &self.poly_Bz, @@ -658,13 +658,13 @@ impl SumcheckEngine for OuterSumcheckInstance { &comb_func, ); - let comb_func2 = |poly_A_comp: &G::Scalar, - poly_B_comp: &G::Scalar, - _poly_C_comp: &G::Scalar| - -> G::Scalar { *poly_A_comp * *poly_B_comp }; + let comb_func2 = |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + _poly_C_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * *poly_B_comp }; let (eval_point_e_0, eval_point_e_2, eval_point_e_3) = - SumcheckProof::::compute_eval_points_cubic( + SumcheckProof::::compute_eval_points_cubic( &self.poly_tau, &self.poly_Mz, &self.poly_zero, @@ -677,7 +677,7 @@ impl SumcheckEngine for OuterSumcheckInstance { ] } - fn bound(&mut self, r: &G::Scalar) { + fn bound(&mut self, r: &E::Scalar) { [ &mut self.poly_tau, &mut self.poly_Az, @@ -689,20 +689,20 @@ impl SumcheckEngine for OuterSumcheckInstance { .for_each(|poly| poly.bind_poly_var_top(r)); } - fn final_claims(&self) -> Vec> { + fn final_claims(&self) -> Vec> { vec![vec![self.poly_Az[0], self.poly_Bz[0]]] } } -struct InnerSumcheckInstance { - claim: G::Scalar, - poly_L_row: MultilinearPolynomial, - poly_L_col: MultilinearPolynomial, - poly_val: MultilinearPolynomial, +struct InnerSumcheckInstance { + claim: E::Scalar, + poly_L_row: MultilinearPolynomial, + poly_L_col: MultilinearPolynomial, + poly_val: MultilinearPolynomial, } -impl SumcheckEngine for InnerSumcheckInstance { - fn initial_claims(&self) -> Vec { +impl SumcheckEngine for InnerSumcheckInstance { + fn initial_claims(&self) -> Vec { vec![self.claim] } @@ -716,20 +716,20 @@ impl SumcheckEngine for InnerSumcheckInstance { self.poly_L_row.len() } - fn evaluation_points(&self) -> Vec> { + fn evaluation_points(&self) -> Vec> { let (poly_A, poly_B, poly_C) = (&self.poly_L_row, &self.poly_L_col, &self.poly_val); - let comb_func = |poly_A_comp: &G::Scalar, - poly_B_comp: &G::Scalar, - poly_C_comp: &G::Scalar| - -> G::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp }; + let comb_func = |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp }; let (eval_point_0, eval_point_2, eval_point_3) = - SumcheckProof::::compute_eval_points_cubic(poly_A, poly_B, poly_C, &comb_func); + SumcheckProof::::compute_eval_points_cubic(poly_A, poly_B, poly_C, &comb_func); vec![vec![eval_point_0, eval_point_2, eval_point_3]] } - fn bound(&mut self, r: &G::Scalar) { + fn bound(&mut self, r: &E::Scalar) { [ &mut self.poly_L_row, &mut self.poly_L_col, @@ -739,7 +739,7 @@ impl SumcheckEngine for InnerSumcheckInstance { .for_each(|poly| poly.bind_poly_var_top(r)); } - fn final_claims(&self) -> Vec> { + fn final_claims(&self) -> Vec> { vec![vec![self.poly_L_row[0], self.poly_L_col[0]]] } } @@ -747,109 +747,109 @@ impl SumcheckEngine for InnerSumcheckInstance { /// A type that represents the prover's key #[derive(Clone, Serialize, Deserialize, Abomonation)] #[serde(bound = "")] -#[abomonation_bounds(where ::Repr: Abomonation)] -pub struct ProverKey> { +#[abomonation_bounds(where ::Repr: Abomonation)] +pub struct ProverKey> { pk_ee: EE::ProverKey, - S_repr: R1CSShapeSparkRepr, - S_comm: R1CSShapeSparkCommitment, - #[abomonate_with(::Repr)] - vk_digest: G::Scalar, // digest of verifier's key + S_repr: R1CSShapeSparkRepr, + S_comm: R1CSShapeSparkCommitment, + #[abomonate_with(::Repr)] + vk_digest: E::Scalar, // digest of verifier's key } /// A type that represents the verifier's key #[derive(Clone, Serialize, Deserialize, Abomonation)] #[serde(bound = "")] -#[abomonation_bounds(where ::Repr: Abomonation)] -pub struct VerifierKey> { +#[abomonation_bounds(where ::Repr: Abomonation)] +pub struct VerifierKey> { num_cons: usize, num_vars: usize, vk_ee: EE::VerifierKey, - S_comm: R1CSShapeSparkCommitment, + S_comm: R1CSShapeSparkCommitment, #[abomonation_skip] #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, + digest: OnceCell, } -impl> SimpleDigestible for VerifierKey {} +impl> SimpleDigestible for VerifierKey {} /// A succinct proof of knowledge of a witness to a relaxed R1CS instance /// The proof is produced using Spartan's combination of the sum-check and /// the commitment to a vector viewed as a polynomial commitment #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "")] -pub struct RelaxedR1CSSNARK> { +pub struct RelaxedR1CSSNARK> { // commitment to oracles: the first three are for Az, Bz, Cz, // and the last two are for memory reads - comm_Az: CompressedCommitment, - comm_Bz: CompressedCommitment, - comm_Cz: CompressedCommitment, - comm_L_row: CompressedCommitment, - comm_L_col: CompressedCommitment, + comm_Az: CompressedCommitment, + comm_Bz: CompressedCommitment, + comm_Cz: CompressedCommitment, + comm_L_row: CompressedCommitment, + comm_L_col: CompressedCommitment, // commitments to aid the memory checks - comm_t_plus_r_inv_row: CompressedCommitment, - comm_w_plus_r_inv_row: CompressedCommitment, - comm_t_plus_r_inv_col: CompressedCommitment, - comm_w_plus_r_inv_col: CompressedCommitment, + comm_t_plus_r_inv_row: CompressedCommitment, + comm_w_plus_r_inv_row: CompressedCommitment, + comm_t_plus_r_inv_col: CompressedCommitment, + comm_w_plus_r_inv_col: CompressedCommitment, // claims about Az, Bz, and Cz polynomials - eval_Az_at_tau: G::Scalar, - eval_Bz_at_tau: G::Scalar, - eval_Cz_at_tau: G::Scalar, + eval_Az_at_tau: E::Scalar, + eval_Bz_at_tau: E::Scalar, + eval_Cz_at_tau: E::Scalar, // sum-check - sc: SumcheckProof, + sc: SumcheckProof, // claims from the end of sum-check - eval_Az: G::Scalar, - eval_Bz: G::Scalar, - eval_Cz: G::Scalar, - eval_E: G::Scalar, - eval_L_row: G::Scalar, - eval_L_col: G::Scalar, - eval_val_A: G::Scalar, - eval_val_B: G::Scalar, - eval_val_C: G::Scalar, - - eval_W: G::Scalar, - - eval_t_plus_r_inv_row: G::Scalar, - eval_row: G::Scalar, // address - eval_w_plus_r_inv_row: G::Scalar, - eval_ts_row: G::Scalar, - - eval_t_plus_r_inv_col: G::Scalar, - eval_col: G::Scalar, // address - eval_w_plus_r_inv_col: G::Scalar, - eval_ts_col: G::Scalar, + eval_Az: E::Scalar, + eval_Bz: E::Scalar, + eval_Cz: E::Scalar, + eval_E: E::Scalar, + eval_L_row: E::Scalar, + eval_L_col: E::Scalar, + eval_val_A: E::Scalar, + eval_val_B: E::Scalar, + eval_val_C: E::Scalar, + + eval_W: E::Scalar, + + eval_t_plus_r_inv_row: E::Scalar, + eval_row: E::Scalar, // address + eval_w_plus_r_inv_row: E::Scalar, + eval_ts_row: E::Scalar, + + eval_t_plus_r_inv_col: E::Scalar, + eval_col: E::Scalar, // address + eval_w_plus_r_inv_col: E::Scalar, + eval_ts_col: E::Scalar, // a PCS evaluation argument eval_arg: EE::EvaluationArgument, } -impl> RelaxedR1CSSNARK +impl> RelaxedR1CSSNARK where - ::Repr: Abomonation, + ::Repr: Abomonation, { fn prove_helper( mem: &mut T1, outer: &mut T2, inner: &mut T3, - transcript: &mut G::TE, + transcript: &mut E::TE, ) -> Result< ( - SumcheckProof, - Vec, - Vec>, - Vec>, - Vec>, + SumcheckProof, + Vec, + Vec>, + Vec>, + Vec>, ), NovaError, > where - T1: SumcheckEngine, - T2: SumcheckEngine, - T3: SumcheckEngine, + T1: SumcheckEngine, + T2: SumcheckEngine, + T3: SumcheckEngine, { // sanity checks assert_eq!(mem.size(), outer.size()); @@ -863,10 +863,10 @@ where .into_iter() .chain(outer.initial_claims()) .chain(inner.initial_claims()) - .collect::>(); + .collect::>(); let s = transcript.squeeze(b"r")?; - let coeffs = powers::(&s, claims.len()); + let coeffs = powers::(&s, claims.len()); // compute the joint claim let claim = claims @@ -876,8 +876,8 @@ where .sum(); let mut e = claim; - let mut r: Vec = Vec::new(); - let mut cubic_polys: Vec> = Vec::new(); + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); let num_rounds = mem.size().log_2(); for _ in 0..num_rounds { let (evals_mem, (evals_outer, evals_inner)) = rayon::join( @@ -885,11 +885,11 @@ where || rayon::join(|| outer.evaluation_points(), || inner.evaluation_points()), ); - let evals: Vec> = evals_mem + let evals: Vec> = evals_mem .into_iter() .chain(evals_outer.into_iter()) .chain(evals_inner.into_iter()) - .collect::>>(); + .collect::>>(); assert_eq!(evals.len(), claims.len()); let evals_combined_0 = (0..evals.len()).map(|i| evals[i][0] * coeffs[i]).sum(); @@ -934,11 +934,11 @@ where } } -impl> VerifierKey { +impl> VerifierKey { fn new( num_cons: usize, num_vars: usize, - S_comm: R1CSShapeSparkCommitment, + S_comm: R1CSShapeSparkCommitment, vk_ee: EE::VerifierKey, ) -> Self { VerifierKey { @@ -950,9 +950,9 @@ impl> VerifierKey { } } } -impl> DigestHelperTrait for VerifierKey { +impl> DigestHelperTrait for VerifierKey { /// Returns the digest of the verifier's key - fn digest(&self) -> G::Scalar { + fn digest(&self) -> E::Scalar { self .digest .get_or_try_init(|| { @@ -964,23 +964,23 @@ impl> DigestHelperTrait for VerifierKe } } -impl> RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK +impl> RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK where - ::Repr: Abomonation, + ::Repr: Abomonation, { - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - Box::new(|shape: &R1CSShape| -> usize { + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { + Box::new(|shape: &R1CSShape| -> usize { // the commitment key should be large enough to commit to the R1CS matrices shape.A.len() + shape.B.len() + shape.C.len() }) } fn setup( - ck: &CommitmentKey, - S: &R1CSShape, + ck: &CommitmentKey, + S: &R1CSShape, ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { // check the provided commitment key meets minimal requirements if ck.length() < Self::ck_floor()(S) { @@ -1009,11 +1009,11 @@ where /// produces a succinct proof of satisfiability of a `RelaxedR1CS` instance #[tracing::instrument(skip_all, name = "PPSNARK::prove")] fn prove( - ck: &CommitmentKey, + ck: &CommitmentKey, pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, ) -> Result { // pad the R1CSShape let S = S.pad(); @@ -1021,7 +1021,7 @@ where assert!(S.is_regular_shape()); let W = W.pad(&S); // pad the witness - let mut transcript = G::TE::new(b"RelaxedR1CSSNARK"); + let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); // append the verifier key (which includes commitment to R1CS matrices) and the RelaxedR1CSInstance to the transcript transcript.absorb(b"vk", &pk.vk_digest); @@ -1035,8 +1035,8 @@ where // commit to Az, Bz, Cz let (comm_Az, (comm_Bz, comm_Cz)) = rayon::join( - || G::CE::commit(ck, &Az), - || rayon::join(|| G::CE::commit(ck, &Bz), || G::CE::commit(ck, &Cz)), + || E::CE::commit(ck, &Az), + || rayon::join(|| E::CE::commit(ck, &Bz), || E::CE::commit(ck, &Cz)), ); transcript.absorb(b"c", &[comm_Az, comm_Bz, comm_Cz].as_slice()); @@ -1048,11 +1048,11 @@ where // (1) send commitments to Az, Bz, and Cz along with their evaluations at tau let (Az, Bz, Cz, W, E) = { - Az.resize(pk.S_repr.N, G::Scalar::ZERO); - Bz.resize(pk.S_repr.N, G::Scalar::ZERO); - Cz.resize(pk.S_repr.N, G::Scalar::ZERO); - let E = padded::(&W.E, pk.S_repr.N, &G::Scalar::ZERO); - let W = padded::(&W.W, pk.S_repr.N, &G::Scalar::ZERO); + Az.resize(pk.S_repr.N, E::Scalar::ZERO); + Bz.resize(pk.S_repr.N, E::Scalar::ZERO); + Cz.resize(pk.S_repr.N, E::Scalar::ZERO); + let E = padded::(&W.E, pk.S_repr.N, &E::Scalar::ZERO); + let W = padded::(&W.W, pk.S_repr.N, &E::Scalar::ZERO); (Az, Bz, Cz, W, E) }; @@ -1060,7 +1060,7 @@ where let evals_at_tau = [&Az, &Bz, &Cz] .into_par_iter() .map(|p| MultilinearPolynomial::evaluate_with(p, &tau_coords)) - .collect::>(); + .collect::>(); (evals_at_tau[0], evals_at_tau[1], evals_at_tau[2]) }; @@ -1069,7 +1069,7 @@ where // L_col(i) = z(col(i)) for all i let (mem_row, mem_col, L_row, L_col) = pk.S_repr.evaluation_oracles(&S, &tau, &z); let (comm_L_row, comm_L_col) = - rayon::join(|| G::CE::commit(ck, &L_row), || G::CE::commit(ck, &L_col)); + rayon::join(|| E::CE::commit(ck, &L_row), || E::CE::commit(ck, &L_col)); // absorb the claimed evaluations into the transcript transcript.absorb( @@ -1086,8 +1086,8 @@ where let poly_vec = vec![&Az, &Bz, &Cz]; transcript.absorb(b"e", &eval_vec.as_slice()); // c_vec is already in the transcript let c = transcript.squeeze(b"c")?; - let w: PolyEvalWitness = PolyEvalWitness::batch(&poly_vec, &c); - let u: PolyEvalInstance = PolyEvalInstance::batch(&comm_vec, &tau_coords, &eval_vec, &c); + let w: PolyEvalWitness = PolyEvalWitness::batch(&poly_vec, &c); + let u: PolyEvalInstance = PolyEvalInstance::batch(&comm_vec, &tau_coords, &eval_vec, &c); // we now need to prove three claims // (1) 0 = \sum_x poly_tau(x) * (poly_Az(x) * poly_Bz(x) - poly_uCz_E(x)), and eval_Az_at_tau + r * eval_Bz_at_tau + r^2 * eval_Cz_at_tau = (Az+r*Bz+r^2*Cz)(tau) @@ -1105,7 +1105,7 @@ where Bz.clone(), (0..Cz.len()) .map(|i| U.u * Cz[i] + E[i]) - .collect::>(), + .collect::>(), w.p.clone(), // Mz = Az + r * Bz + r^2 * Cz &u.e, // eval_Az_at_tau + r * eval_Az_at_tau + r^2 * eval_Cz_at_tau ); @@ -1118,7 +1118,7 @@ where .zip(pk.S_repr.val_B.par_iter()) .zip(pk.S_repr.val_C.par_iter()) .map(|((v_a, v_b), v_c)| *v_a + c * *v_b + c * c * *v_c) - .collect::>(); + .collect::>(); let inner_sc_inst = InnerSumcheckInstance { claim: eval_Az_at_tau + c * eval_Bz_at_tau + c * c * eval_Cz_at_tau, poly_L_row: MultilinearPolynomial::new(L_row.clone()), @@ -1133,22 +1133,22 @@ where // we now need to prove that L_row and L_col are well-formed // hash the tuples of (addr,val) memory contents and read responses into a single field element using `hash_func` - let hash_func_vec = |mem: &[G::Scalar], - addr: &[G::Scalar], - lookups: &[G::Scalar]| - -> (Vec, Vec) { - let hash_func = |addr: &G::Scalar, val: &G::Scalar| -> G::Scalar { *val * gamma + *addr }; + let hash_func_vec = |mem: &[E::Scalar], + addr: &[E::Scalar], + lookups: &[E::Scalar]| + -> (Vec, Vec) { + let hash_func = |addr: &E::Scalar, val: &E::Scalar| -> E::Scalar { *val * gamma + *addr }; assert_eq!(addr.len(), lookups.len()); rayon::join( || { (0..mem.len()) - .map(|i| hash_func(&G::Scalar::from(i as u64), &mem[i])) - .collect::>() + .map(|i| hash_func(&E::Scalar::from(i as u64), &mem[i])) + .collect::>() }, || { (0..addr.len()) .map(|i| hash_func(&addr[i], &lookups[i])) - .collect::>() + .collect::>() }, ) }; @@ -1210,7 +1210,7 @@ where ] .into_par_iter() .map(|p| MultilinearPolynomial::evaluate_with(p, &rand_sc)) - .collect::>(); + .collect::>(); (e[0], e[1], e[2], e[3], e[4], e[5], e[6], e[7]) }; @@ -1236,7 +1236,7 @@ where eval_ts_col, ] .into_iter() - .collect::>(); + .collect::>(); let comm_vec = [ U.comm_W, @@ -1280,8 +1280,8 @@ where ]; transcript.absorb(b"e", &eval_vec.as_slice()); // comm_vec is already in the transcript let c = transcript.squeeze(b"c")?; - let w: PolyEvalWitness = PolyEvalWitness::batch(&poly_vec, &c); - let u: PolyEvalInstance = PolyEvalInstance::batch(&comm_vec, &rand_sc, &eval_vec, &c); + let w: PolyEvalWitness = PolyEvalWitness::batch(&poly_vec, &c); + let u: PolyEvalInstance = PolyEvalInstance::batch(&comm_vec, &rand_sc, &eval_vec, &c); let eval_arg = EE::prove(ck, &pk.pk_ee, &mut transcript, &u.c, &w.p, &rand_sc, &u.e)?; @@ -1330,22 +1330,22 @@ where } /// verifies a proof of satisfiability of a `RelaxedR1CS` instance - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { - let mut transcript = G::TE::new(b"RelaxedR1CSSNARK"); + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { + let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); // append the verifier key (including commitment to R1CS matrices) and the RelaxedR1CSInstance to the transcript transcript.absorb(b"vk", &vk.digest()); transcript.absorb(b"U", U); - let comm_Az = Commitment::::decompress(&self.comm_Az)?; - let comm_Bz = Commitment::::decompress(&self.comm_Bz)?; - let comm_Cz = Commitment::::decompress(&self.comm_Cz)?; - let comm_L_row = Commitment::::decompress(&self.comm_L_row)?; - let comm_L_col = Commitment::::decompress(&self.comm_L_col)?; - let comm_t_plus_r_inv_row = Commitment::::decompress(&self.comm_t_plus_r_inv_row)?; - let comm_w_plus_r_inv_row = Commitment::::decompress(&self.comm_w_plus_r_inv_row)?; - let comm_t_plus_r_inv_col = Commitment::::decompress(&self.comm_t_plus_r_inv_col)?; - let comm_w_plus_r_inv_col = Commitment::::decompress(&self.comm_w_plus_r_inv_col)?; + let comm_Az = Commitment::::decompress(&self.comm_Az)?; + let comm_Bz = Commitment::::decompress(&self.comm_Bz)?; + let comm_Cz = Commitment::::decompress(&self.comm_Cz)?; + let comm_L_row = Commitment::::decompress(&self.comm_L_row)?; + let comm_L_col = Commitment::::decompress(&self.comm_L_col)?; + let comm_t_plus_r_inv_row = Commitment::::decompress(&self.comm_t_plus_r_inv_row)?; + let comm_w_plus_r_inv_row = Commitment::::decompress(&self.comm_w_plus_r_inv_row)?; + let comm_t_plus_r_inv_col = Commitment::::decompress(&self.comm_t_plus_r_inv_col)?; + let comm_w_plus_r_inv_col = Commitment::::decompress(&self.comm_w_plus_r_inv_col)?; transcript.absorb(b"c", &[comm_Az, comm_Bz, comm_Cz].as_slice()); @@ -1376,7 +1376,7 @@ where let comm_vec = vec![comm_Az, comm_Bz, comm_Cz]; transcript.absorb(b"e", &eval_vec.as_slice()); // c_vec is already in the transcript let c = transcript.squeeze(b"c")?; - let u: PolyEvalInstance = PolyEvalInstance::batch(&comm_vec, &tau_coords, &eval_vec, &c); + let u: PolyEvalInstance = PolyEvalInstance::batch(&comm_vec, &tau_coords, &eval_vec, &c); let claim = u.e; let gamma = transcript.squeeze(b"g")?; @@ -1398,7 +1398,7 @@ where let num_claims = 9; let s = transcript.squeeze(b"r")?; - let coeffs = powers::(&s, num_claims); + let coeffs = powers::(&s, num_claims); let claim = (coeffs[7] + coeffs[8]) * claim; // rest are zeros // verify sc @@ -1435,9 +1435,9 @@ where let (factor, rand_sc_unpad) = { let l = vk.S_comm.N.log_2() - (2 * vk.num_vars).log_2(); - let mut factor = G::Scalar::ONE; + let mut factor = E::Scalar::ONE; for r_p in rand_sc.iter().take(l) { - factor *= G::Scalar::ONE - r_p + factor *= E::Scalar::ONE - r_p } let rand_sc_unpad = { @@ -1455,7 +1455,7 @@ where poly_X.extend( (0..U.X.len()) .map(|i| (i + 1, U.X[i])) - .collect::>(), + .collect::>(), ); SparsePolynomial::new(vk.num_vars.log_2(), poly_X).evaluate(&rand_sc_unpad[1..]) }; @@ -1473,7 +1473,7 @@ where eval_w + r }; - let claim_mem_final_expected: G::Scalar = coeffs[0] + let claim_mem_final_expected: E::Scalar = coeffs[0] * (self.eval_t_plus_r_inv_row - self.eval_w_plus_r_inv_row) + coeffs[1] * (self.eval_t_plus_r_inv_col - self.eval_w_plus_r_inv_col) + coeffs[2] @@ -1481,13 +1481,13 @@ where * (self.eval_t_plus_r_inv_row * eval_t_plus_r_row - self.eval_ts_row)) + coeffs[3] * (rand_eq_bound_rand_sc - * (self.eval_w_plus_r_inv_row * eval_w_plus_r_row - G::Scalar::ONE)) + * (self.eval_w_plus_r_inv_row * eval_w_plus_r_row - E::Scalar::ONE)) + coeffs[4] * (rand_eq_bound_rand_sc * (self.eval_t_plus_r_inv_col * eval_t_plus_r_col - self.eval_ts_col)) + coeffs[5] * (rand_eq_bound_rand_sc - * (self.eval_w_plus_r_inv_col * eval_w_plus_r_col - G::Scalar::ONE)); + * (self.eval_w_plus_r_inv_col * eval_w_plus_r_col - E::Scalar::ONE)); let claim_outer_final_expected = coeffs[6] * taus_bound_rand_sc @@ -1526,7 +1526,7 @@ where self.eval_ts_col, ] .into_iter() - .collect::>(); + .collect::>(); let comm_vec = [ U.comm_W, comm_Az, @@ -1549,7 +1549,7 @@ where ]; transcript.absorb(b"e", &eval_vec.as_slice()); // comm_vec is already in the transcript let c = transcript.squeeze(b"c")?; - let u: PolyEvalInstance = PolyEvalInstance::batch(&comm_vec, &rand_sc, &eval_vec, &c); + let u: PolyEvalInstance = PolyEvalInstance::batch(&comm_vec, &rand_sc, &eval_vec, &c); // verify EE::verify( diff --git a/src/spartan/snark.rs b/src/spartan/snark.rs index 2f3ae02bf..f9f2d52c8 100644 --- a/src/spartan/snark.rs +++ b/src/spartan/snark.rs @@ -17,7 +17,7 @@ use crate::{ traits::{ evaluation::EvaluationEngineTrait, snark::{DigestHelperTrait, RelaxedR1CSSNARKTrait}, - Group, TranscriptEngineTrait, + Engine, TranscriptEngineTrait, }, Commitment, CommitmentKey, }; @@ -33,29 +33,29 @@ use serde::{Deserialize, Serialize}; /// A type that represents the prover's key #[derive(Clone, Serialize, Deserialize, Abomonation)] #[serde(bound = "")] -#[abomonation_bounds(where ::Repr: Abomonation)] -pub struct ProverKey> { +#[abomonation_bounds(where ::Repr: Abomonation)] +pub struct ProverKey> { pk_ee: EE::ProverKey, - #[abomonate_with(::Repr)] - vk_digest: G::Scalar, // digest of the verifier's key + #[abomonate_with(::Repr)] + vk_digest: E::Scalar, // digest of the verifier's key } /// A type that represents the verifier's key #[derive(Clone, Serialize, Deserialize, Abomonation)] #[serde(bound = "")] -#[abomonation_bounds(where ::Repr: Abomonation)] -pub struct VerifierKey> { +#[abomonation_bounds(where ::Repr: Abomonation)] +pub struct VerifierKey> { vk_ee: EE::VerifierKey, - S: R1CSShape, + S: R1CSShape, #[abomonation_skip] #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, + digest: OnceCell, } -impl> SimpleDigestible for VerifierKey {} +impl> SimpleDigestible for VerifierKey {} -impl> VerifierKey { - fn new(shape: R1CSShape, vk_ee: EE::VerifierKey) -> Self { +impl> VerifierKey { + fn new(shape: R1CSShape, vk_ee: EE::VerifierKey) -> Self { VerifierKey { vk_ee, S: shape, @@ -64,13 +64,13 @@ impl> VerifierKey { } } -impl> DigestHelperTrait for VerifierKey { +impl> DigestHelperTrait for VerifierKey { /// Returns the digest of the verifier's key. - fn digest(&self) -> G::Scalar { + fn digest(&self) -> E::Scalar { self .digest .get_or_try_init(|| { - let dc = DigestComputer::::new(self); + let dc = DigestComputer::::new(self); dc.digest() }) .cloned() @@ -83,33 +83,33 @@ impl> DigestHelperTrait for VerifierKe /// the commitment to a vector viewed as a polynomial commitment #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "")] -pub struct RelaxedR1CSSNARK> { - sc_proof_outer: SumcheckProof, - claims_outer: (G::Scalar, G::Scalar, G::Scalar), - eval_E: G::Scalar, - sc_proof_inner: SumcheckProof, - eval_W: G::Scalar, - sc_proof_batch: SumcheckProof, - evals_batch: Vec, +pub struct RelaxedR1CSSNARK> { + sc_proof_outer: SumcheckProof, + claims_outer: (E::Scalar, E::Scalar, E::Scalar), + eval_E: E::Scalar, + sc_proof_inner: SumcheckProof, + eval_W: E::Scalar, + sc_proof_batch: SumcheckProof, + evals_batch: Vec, eval_arg: EE::EvaluationArgument, } -impl> RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK +impl> RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK where - ::Repr: Abomonation, + ::Repr: Abomonation, { - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; fn setup( - ck: &CommitmentKey, - S: &R1CSShape, + ck: &CommitmentKey, + S: &R1CSShape, ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { let (pk_ee, vk_ee) = EE::setup(ck); let S = S.pad(); - let vk: VerifierKey = VerifierKey::new(S, vk_ee); + let vk: VerifierKey = VerifierKey::new(S, vk_ee); let pk = ProverKey { pk_ee, @@ -122,11 +122,11 @@ where /// produces a succinct proof of satisfiability of a `RelaxedR1CS` instance #[tracing::instrument(skip_all, name = "SNARK::prove")] fn prove( - ck: &CommitmentKey, + ck: &CommitmentKey, pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, ) -> Result { // pad the R1CSShape let S = S.pad(); @@ -134,7 +134,7 @@ where assert!(S.is_regular_shape()); let W = W.pad(&S); // pad the witness - let mut transcript = G::TE::new(b"RelaxedR1CSSNARK"); + let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); // append the digest of vk (which includes R1CS matrices) and the RelaxedR1CSInstance to the transcript transcript.absorb(b"vk", &pk.vk_digest); @@ -151,14 +151,14 @@ where // outer sum-check let tau = (0..num_rounds_x) .map(|_i| transcript.squeeze(b"t")) - .collect::, NovaError>>()?; + .collect::, NovaError>>()?; let mut poly_tau = MultilinearPolynomial::new(EqPolynomial::new(tau).evals()); let (mut poly_Az, mut poly_Bz, poly_Cz, mut poly_uCz_E) = { let (poly_Az, poly_Bz, poly_Cz) = S.multiply_vec(&z)?; let poly_uCz_E = (0..S.num_cons) .map(|i| U.u * poly_Cz[i] + W.E[i]) - .collect::>(); + .collect::>(); ( MultilinearPolynomial::new(poly_Az), MultilinearPolynomial::new(poly_Bz), @@ -168,13 +168,13 @@ where }; let comb_func_outer = - |poly_A_comp: &G::Scalar, - poly_B_comp: &G::Scalar, - poly_C_comp: &G::Scalar, - poly_D_comp: &G::Scalar| - -> G::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; let (sc_proof_outer, r_x, claims_outer) = SumcheckProof::prove_cubic_with_additive_term( - &G::Scalar::ZERO, // claim is zero + &E::Scalar::ZERO, // claim is zero num_rounds_x, &mut poly_tau, &mut poly_Az, @@ -185,7 +185,7 @@ where )?; // claims from the end of sum-check - let (claim_Az, claim_Bz): (G::Scalar, G::Scalar) = (claims_outer[1], claims_outer[2]); + let (claim_Az, claim_Bz): (E::Scalar, E::Scalar) = (claims_outer[1], claims_outer[2]); let claim_Cz = poly_Cz.evaluate(&r_x); let eval_E = MultilinearPolynomial::new(W.E.clone()).evaluate(&r_x); transcript.absorb( @@ -203,10 +203,10 @@ where // Bounds "row" variables of (A, B, C) matrices viewed as 2d multilinear polynomials let compute_eval_table_sparse = - |S: &R1CSShape, rx: &[G::Scalar]| -> (Vec, Vec, Vec) { + |S: &R1CSShape, rx: &[E::Scalar]| -> (Vec, Vec, Vec) { assert_eq!(rx.len(), S.num_cons); - let inner = |M: &SparseMatrix, M_evals: &mut Vec| { + let inner = |M: &SparseMatrix, M_evals: &mut Vec| { for (row_idx, ptrs) in M.indptr.windows(2).enumerate() { for (val, col_idx) in M.get_row_unchecked(ptrs.try_into().unwrap()) { M_evals[*col_idx] += rx[row_idx] * val; @@ -216,19 +216,19 @@ where let (A_evals, (B_evals, C_evals)) = rayon::join( || { - let mut A_evals: Vec = vec![G::Scalar::ZERO; 2 * S.num_vars]; + let mut A_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; inner(&S.A, &mut A_evals); A_evals }, || { rayon::join( || { - let mut B_evals: Vec = vec![G::Scalar::ZERO; 2 * S.num_vars]; + let mut B_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; inner(&S.B, &mut B_evals); B_evals }, || { - let mut C_evals: Vec = vec![G::Scalar::ZERO; 2 * S.num_vars]; + let mut C_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; inner(&S.C, &mut C_evals); C_evals }, @@ -246,15 +246,15 @@ where (0..evals_A.len()) .into_par_iter() .map(|i| evals_A[i] + r * evals_B[i] + r * r * evals_C[i]) - .collect::>() + .collect::>() }; let poly_z = { - z.resize(S.num_vars * 2, G::Scalar::ZERO); + z.resize(S.num_vars * 2, E::Scalar::ZERO); z }; - let comb_func = |poly_A_comp: &G::Scalar, poly_B_comp: &G::Scalar| -> G::Scalar { + let comb_func = |poly_A_comp: &E::Scalar, poly_B_comp: &E::Scalar| -> E::Scalar { *poly_A_comp * *poly_B_comp }; let (sc_proof_inner, r_y, _claims_inner) = SumcheckProof::prove_quad( @@ -296,7 +296,7 @@ where // to the batched polynomial. assert!(w_u_vec.len() >= 2); - let (w_vec, u_vec): (Vec>, Vec>) = + let (w_vec, u_vec): (Vec>, Vec>) = w_u_vec.into_iter().unzip(); let (batched_u, batched_w, sc_proof_batch, claims_batch_left) = @@ -325,8 +325,8 @@ where } /// verifies a proof of satisfiability of a `RelaxedR1CS` instance - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { - let mut transcript = G::TE::new(b"RelaxedR1CSSNARK"); + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { + let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); // append the digest of R1CS matrices and the RelaxedR1CSInstance to the transcript transcript.absorb(b"vk", &vk.digest()); @@ -340,12 +340,12 @@ where // outer sum-check let tau = (0..num_rounds_x) .map(|_i| transcript.squeeze(b"t")) - .collect::, NovaError>>()?; + .collect::, NovaError>>()?; let (claim_outer_final, r_x) = self .sc_proof_outer - .verify(G::Scalar::ZERO, num_rounds_x, 3, &mut transcript)?; + .verify(E::Scalar::ZERO, num_rounds_x, 3, &mut transcript)?; // verify claim_outer_final let (claim_Az, claim_Bz, claim_Cz) = self.claims_outer; @@ -386,28 +386,28 @@ where poly_X.extend( (0..U.X.len()) .map(|i| (i + 1, U.X[i])) - .collect::>(), + .collect::>(), ); SparsePolynomial::new(usize::try_from(vk.S.num_vars.ilog2()).unwrap(), poly_X) .evaluate(&r_y[1..]) }; - (G::Scalar::ONE - r_y[0]) * self.eval_W + r_y[0] * eval_X + (E::Scalar::ONE - r_y[0]) * self.eval_W + r_y[0] * eval_X }; // compute evaluations of R1CS matrices - let multi_evaluate = |M_vec: &[&SparseMatrix], - r_x: &[G::Scalar], - r_y: &[G::Scalar]| - -> Vec { + let multi_evaluate = |M_vec: &[&SparseMatrix], + r_x: &[E::Scalar], + r_y: &[E::Scalar]| + -> Vec { let evaluate_with_table = - |M: &SparseMatrix, T_x: &[G::Scalar], T_y: &[G::Scalar]| -> G::Scalar { + |M: &SparseMatrix, T_x: &[E::Scalar], T_y: &[E::Scalar]| -> E::Scalar { M.indptr .par_windows(2) .enumerate() .map(|(row_idx, ptrs)| { M.get_row_unchecked(ptrs.try_into().unwrap()) .map(|(val, col_idx)| T_x[row_idx] * T_y[*col_idx] * val) - .sum::() + .sum::() }) .sum() }; @@ -431,7 +431,7 @@ where } // add claims about W and E polynomials - let u_vec: Vec> = vec![ + let u_vec: Vec> = vec![ PolyEvalInstance { c: U.comm_W, x: r_y[1..].to_vec(), @@ -467,16 +467,16 @@ where /// Proves a batch of polynomial evaluation claims using Sumcheck /// reducing them to a single claim at the same point. -fn batch_eval_prove( - u_vec: Vec>, - w_vec: Vec>, - transcript: &mut G::TE, +fn batch_eval_prove( + u_vec: Vec>, + w_vec: Vec>, + transcript: &mut E::TE, ) -> Result< ( - PolyEvalInstance, - PolyEvalWitness, - SumcheckProof, - Vec, + PolyEvalInstance, + PolyEvalWitness, + SumcheckProof, + Vec, ), NovaError, > { @@ -488,25 +488,25 @@ fn batch_eval_prove( // generate a challenge let rho = transcript.squeeze(b"r")?; let num_claims = w_vec_padded.len(); - let powers_of_rho = powers::(&rho, num_claims); + let powers_of_rho = powers::(&rho, num_claims); let claim_batch_joint = u_vec_padded .iter() .zip(powers_of_rho.iter()) .map(|(u, p)| u.e * p) .sum(); - let mut polys_left: Vec> = w_vec_padded + let mut polys_left: Vec> = w_vec_padded .iter() .map(|w| MultilinearPolynomial::new(w.p.clone())) .collect(); - let mut polys_right: Vec> = u_vec_padded + let mut polys_right: Vec> = u_vec_padded .iter() .map(|u| MultilinearPolynomial::new(EqPolynomial::new(u.x.clone()).evals())) .collect(); let num_rounds_z = u_vec_padded[0].x.len(); let comb_func = - |poly_A_comp: &G::Scalar, poly_B_comp: &G::Scalar| -> G::Scalar { *poly_A_comp * *poly_B_comp }; + |poly_A_comp: &E::Scalar, poly_B_comp: &E::Scalar| -> E::Scalar { *poly_A_comp * *poly_B_comp }; let (sc_proof_batch, r_z, claims_batch) = SumcheckProof::prove_quad_batch( &claim_batch_joint, num_rounds_z, @@ -517,18 +517,18 @@ fn batch_eval_prove( transcript, )?; - let (claims_batch_left, _): (Vec, Vec) = claims_batch; + let (claims_batch_left, _): (Vec, Vec) = claims_batch; transcript.absorb(b"l", &claims_batch_left.as_slice()); // we now combine evaluation claims at the same point rz into one let gamma = transcript.squeeze(b"g")?; - let powers_of_gamma: Vec = powers::(&gamma, num_claims); + let powers_of_gamma: Vec = powers::(&gamma, num_claims); let comm_joint = u_vec_padded .iter() .zip(powers_of_gamma.iter()) .map(|(u, g_i)| u.c * *g_i) - .fold(Commitment::::default(), |acc, item| acc + item); + .fold(Commitment::::default(), |acc, item| acc + item); let poly_joint = PolyEvalWitness::weighted_sum(&w_vec_padded, &powers_of_gamma); let eval_joint = claims_batch_left .iter() @@ -537,7 +537,7 @@ fn batch_eval_prove( .sum(); Ok(( - PolyEvalInstance:: { + PolyEvalInstance:: { c: comm_joint, x: r_z, e: eval_joint, @@ -550,12 +550,12 @@ fn batch_eval_prove( /// Verifies a batch of polynomial evaluation claims using Sumcheck /// reducing them to a single claim at the same point. -fn batch_eval_verify( - u_vec: Vec>, - transcript: &mut G::TE, - sc_proof_batch: &SumcheckProof, - evals_batch: &[G::Scalar], -) -> Result, NovaError> { +fn batch_eval_verify( + u_vec: Vec>, + transcript: &mut E::TE, + sc_proof_batch: &SumcheckProof, + evals_batch: &[E::Scalar], +) -> Result, NovaError> { assert_eq!(evals_batch.len(), evals_batch.len()); let u_vec_padded = PolyEvalInstance::pad(u_vec); // pad the evaluation points @@ -563,7 +563,7 @@ fn batch_eval_verify( // generate a challenge let rho = transcript.squeeze(b"r")?; let num_claims: usize = u_vec_padded.len(); - let powers_of_rho = powers::(&rho, num_claims); + let powers_of_rho = powers::(&rho, num_claims); let claim_batch_joint = u_vec_padded .iter() .zip(powers_of_rho.iter()) @@ -580,7 +580,7 @@ fn batch_eval_verify( let evals = u_vec_padded .iter() .map(|u| poly_rz.evaluate(&u.x)) - .collect::>(); + .collect::>(); evals .iter() @@ -598,19 +598,19 @@ fn batch_eval_verify( // we now combine evaluation claims at the same point rz into one let gamma = transcript.squeeze(b"g")?; - let powers_of_gamma: Vec = powers::(&gamma, num_claims); + let powers_of_gamma: Vec = powers::(&gamma, num_claims); let comm_joint = u_vec_padded .iter() .zip(powers_of_gamma.iter()) .map(|(u, g_i)| u.c * *g_i) - .fold(Commitment::::default(), |acc, item| acc + item); + .fold(Commitment::::default(), |acc, item| acc + item); let eval_joint = evals_batch .iter() .zip(powers_of_gamma.iter()) .map(|(e, g_i)| *e * *g_i) .sum(); - Ok(PolyEvalInstance:: { + Ok(PolyEvalInstance:: { c: comm_joint, x: r_z, e: eval_joint, diff --git a/src/spartan/sumcheck.rs b/src/spartan/sumcheck.rs index 6c9f13d8e..fdf168bef 100644 --- a/src/spartan/sumcheck.rs +++ b/src/spartan/sumcheck.rs @@ -3,31 +3,31 @@ use crate::spartan::polys::{ multilinear::MultilinearPolynomial, univariate::{CompressedUniPoly, UniPoly}, }; -use crate::traits::{Group, TranscriptEngineTrait}; +use crate::traits::{Engine, TranscriptEngineTrait}; use ff::Field; use rayon::prelude::*; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "")] -pub(crate) struct SumcheckProof { - compressed_polys: Vec>, +pub(crate) struct SumcheckProof { + compressed_polys: Vec>, } -impl SumcheckProof { - pub fn new(compressed_polys: Vec>) -> Self { +impl SumcheckProof { + pub fn new(compressed_polys: Vec>) -> Self { Self { compressed_polys } } pub fn verify( &self, - claim: G::Scalar, + claim: E::Scalar, num_rounds: usize, degree_bound: usize, - transcript: &mut G::TE, - ) -> Result<(G::Scalar, Vec), NovaError> { + transcript: &mut E::TE, + ) -> Result<(E::Scalar, Vec), NovaError> { let mut e = claim; - let mut r: Vec = Vec::new(); + let mut r: Vec = Vec::new(); // verify that there is a univariate polynomial for each round if self.compressed_polys.len() != num_rounds { @@ -63,12 +63,12 @@ impl SumcheckProof { #[inline] pub(in crate::spartan) fn compute_eval_points_quad( - poly_A: &MultilinearPolynomial, - poly_B: &MultilinearPolynomial, + poly_A: &MultilinearPolynomial, + poly_B: &MultilinearPolynomial, comb_func: &F, - ) -> (G::Scalar, G::Scalar) + ) -> (E::Scalar, E::Scalar) where - F: Fn(&G::Scalar, &G::Scalar) -> G::Scalar + Sync, + F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, { let len = poly_A.len() / 2; (0..len) @@ -84,24 +84,24 @@ impl SumcheckProof { (eval_point_0, eval_point_2) }) .reduce( - || (G::Scalar::ZERO, G::Scalar::ZERO), + || (E::Scalar::ZERO, E::Scalar::ZERO), |a, b| (a.0 + b.0, a.1 + b.1), ) } pub fn prove_quad( - claim: &G::Scalar, + claim: &E::Scalar, num_rounds: usize, - poly_A: &mut MultilinearPolynomial, - poly_B: &mut MultilinearPolynomial, + poly_A: &mut MultilinearPolynomial, + poly_B: &mut MultilinearPolynomial, comb_func: F, - transcript: &mut G::TE, - ) -> Result<(Self, Vec, Vec), NovaError> + transcript: &mut E::TE, + ) -> Result<(Self, Vec, Vec), NovaError> where - F: Fn(&G::Scalar, &G::Scalar) -> G::Scalar + Sync, + F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, { - let mut r: Vec = Vec::new(); - let mut polys: Vec> = Vec::new(); + let mut r: Vec = Vec::new(); + let mut polys: Vec> = Vec::new(); let mut claim_per_round = *claim; for _ in 0..num_rounds { let poly = { @@ -140,23 +140,23 @@ impl SumcheckProof { } pub fn prove_quad_batch( - claim: &G::Scalar, + claim: &E::Scalar, num_rounds: usize, - poly_A_vec: &mut Vec>, - poly_B_vec: &mut Vec>, - coeffs: &[G::Scalar], + poly_A_vec: &mut Vec>, + poly_B_vec: &mut Vec>, + coeffs: &[E::Scalar], comb_func: F, - transcript: &mut G::TE, - ) -> Result<(Self, Vec, (Vec, Vec)), NovaError> + transcript: &mut E::TE, + ) -> Result<(Self, Vec, (Vec, Vec)), NovaError> where - F: Fn(&G::Scalar, &G::Scalar) -> G::Scalar + Sync, + F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, { let mut e = *claim; - let mut r: Vec = Vec::new(); - let mut quad_polys: Vec> = Vec::new(); + let mut r: Vec = Vec::new(); + let mut quad_polys: Vec> = Vec::new(); for _ in 0..num_rounds { - let evals: Vec<(G::Scalar, G::Scalar)> = poly_A_vec + let evals: Vec<(E::Scalar, E::Scalar)> = poly_A_vec .par_iter() .zip(poly_B_vec.par_iter()) .map(|(poly_A, poly_B)| Self::compute_eval_points_quad(poly_A, poly_B, &comb_func)) @@ -199,13 +199,13 @@ impl SumcheckProof { #[inline] pub(in crate::spartan) fn compute_eval_points_cubic( - poly_A: &MultilinearPolynomial, - poly_B: &MultilinearPolynomial, - poly_C: &MultilinearPolynomial, + poly_A: &MultilinearPolynomial, + poly_B: &MultilinearPolynomial, + poly_C: &MultilinearPolynomial, comb_func: &F, - ) -> (G::Scalar, G::Scalar, G::Scalar) + ) -> (E::Scalar, E::Scalar, E::Scalar) where - F: Fn(&G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar + Sync, + F: Fn(&E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, { let len = poly_A.len() / 2; (0..len) @@ -236,21 +236,21 @@ impl SumcheckProof { (eval_point_0, eval_point_2, eval_point_3) }) .reduce( - || (G::Scalar::ZERO, G::Scalar::ZERO, G::Scalar::ZERO), + || (E::Scalar::ZERO, E::Scalar::ZERO, E::Scalar::ZERO), |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), ) } #[inline] pub(in crate::spartan) fn compute_eval_points_cubic_with_additive_term( - poly_A: &MultilinearPolynomial, - poly_B: &MultilinearPolynomial, - poly_C: &MultilinearPolynomial, - poly_D: &MultilinearPolynomial, + poly_A: &MultilinearPolynomial, + poly_B: &MultilinearPolynomial, + poly_C: &MultilinearPolynomial, + poly_D: &MultilinearPolynomial, comb_func: &F, - ) -> (G::Scalar, G::Scalar, G::Scalar) + ) -> (E::Scalar, E::Scalar, E::Scalar) where - F: Fn(&G::Scalar, &G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar + Sync, + F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, { let len = poly_A.len() / 2; (0..len) @@ -285,26 +285,26 @@ impl SumcheckProof { (eval_point_0, eval_point_2, eval_point_3) }) .reduce( - || (G::Scalar::ZERO, G::Scalar::ZERO, G::Scalar::ZERO), + || (E::Scalar::ZERO, E::Scalar::ZERO, E::Scalar::ZERO), |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), ) } pub fn prove_cubic_with_additive_term( - claim: &G::Scalar, + claim: &E::Scalar, num_rounds: usize, - poly_A: &mut MultilinearPolynomial, - poly_B: &mut MultilinearPolynomial, - poly_C: &mut MultilinearPolynomial, - poly_D: &mut MultilinearPolynomial, + poly_A: &mut MultilinearPolynomial, + poly_B: &mut MultilinearPolynomial, + poly_C: &mut MultilinearPolynomial, + poly_D: &mut MultilinearPolynomial, comb_func: F, - transcript: &mut G::TE, - ) -> Result<(Self, Vec, Vec), NovaError> + transcript: &mut E::TE, + ) -> Result<(Self, Vec, Vec), NovaError> where - F: Fn(&G::Scalar, &G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar + Sync, + F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, { - let mut r: Vec = Vec::new(); - let mut polys: Vec> = Vec::new(); + let mut r: Vec = Vec::new(); + let mut polys: Vec> = Vec::new(); let mut claim_per_round = *claim; for _ in 0..num_rounds { diff --git a/src/traits/circuit.rs b/src/traits/circuit.rs index d2cd2aaee..71037c6f7 100644 --- a/src/traits/circuit.rs +++ b/src/traits/circuit.rs @@ -26,10 +26,7 @@ pub struct TrivialCircuit { _p: PhantomData, } -impl StepCircuit for TrivialCircuit -where - F: PrimeField, -{ +impl StepCircuit for TrivialCircuit { fn arity(&self) -> usize { 1 } diff --git a/src/traits/commitment.rs b/src/traits/commitment.rs index 70f05f9c0..5bc298a82 100644 --- a/src/traits/commitment.rs +++ b/src/traits/commitment.rs @@ -2,12 +2,12 @@ //! We require the commitment engine to provide a commitment to vectors with a single group element use crate::{ errors::NovaError, - traits::{AbsorbInROTrait, Group, TranscriptReprTrait}, + traits::{AbsorbInROTrait, Engine, TranscriptReprTrait}, }; use abomonation::Abomonation; use core::{ fmt::Debug, - ops::{Add, AddAssign, Mul, MulAssign}, + ops::{Add, Mul, MulAssign}, }; use serde::{Deserialize, Serialize}; @@ -17,29 +17,8 @@ pub trait ScalarMul: Mul + MulAssign ScalarMul for T where T: Mul + MulAssign {} -/// Defines basic operations on commitments -pub trait CommitmentOps: - Add + AddAssign -{ -} - -impl CommitmentOps for T where - T: Add + AddAssign -{ -} - -/// A helper trait for references with a commitment operation -pub trait CommitmentOpsOwned: - for<'r> CommitmentOps<&'r Rhs, Output> -{ -} -impl CommitmentOpsOwned for T where - T: for<'r> CommitmentOps<&'r Rhs, Output> -{ -} - /// This trait defines the behavior of the commitment -pub trait CommitmentTrait: +pub trait CommitmentTrait: Clone + Copy + Debug @@ -48,14 +27,13 @@ pub trait CommitmentTrait: + Eq + Send + Sync - + TranscriptReprTrait + + TranscriptReprTrait + Serialize + for<'de> Deserialize<'de> + Abomonation - + AbsorbInROTrait - + CommitmentOps - + CommitmentOpsOwned - + ScalarMul + + AbsorbInROTrait + + Add + + ScalarMul { /// Holds the type of the compressed commitment type CompressedCommitment: Clone @@ -64,7 +42,7 @@ pub trait CommitmentTrait: + Eq + Send + Sync - + TranscriptReprTrait + + TranscriptReprTrait + Serialize + for<'de> Deserialize<'de>; @@ -72,7 +50,7 @@ pub trait CommitmentTrait: fn compress(&self) -> Self::CompressedCommitment; /// Returns the coordinate representation of the commitment - fn to_coordinates(&self) -> (G::Base, G::Base, bool); + fn to_coordinates(&self) -> (E::Base, E::Base, bool); /// Decompresses a compressed commitment into a commitment fn decompress(c: &Self::CompressedCommitment) -> Result; @@ -86,7 +64,7 @@ pub trait Len { } /// A trait that ties different pieces of the commitment generation together -pub trait CommitmentEngineTrait: Clone + Send + Sync { +pub trait CommitmentEngineTrait: Clone + Send + Sync { /// Holds the type of the commitment key /// The key should quantify its length in terms of group generators. type CommitmentKey: Len @@ -100,11 +78,11 @@ pub trait CommitmentEngineTrait: Clone + Send + Sync { + Abomonation; /// Holds the type of the commitment - type Commitment: CommitmentTrait; + type Commitment: CommitmentTrait; /// Samples a new commitment key of a specified size fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey; /// Commits to the provided vector using the provided generators - fn commit(ck: &Self::CommitmentKey, v: &[G::Scalar]) -> Self::Commitment; + fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment; } diff --git a/src/traits/evaluation.rs b/src/traits/evaluation.rs index 5e29f0195..726aa3931 100644 --- a/src/traits/evaluation.rs +++ b/src/traits/evaluation.rs @@ -3,13 +3,13 @@ //! and a commitment provided by the commitment engine is treated as a multilinear polynomial commitment use crate::{ errors::NovaError, - traits::{commitment::CommitmentEngineTrait, Group}, + traits::{commitment::CommitmentEngineTrait, Engine}, }; use abomonation::Abomonation; use serde::{Deserialize, Serialize}; /// A trait that ties different pieces of the commitment evaluation together -pub trait EvaluationEngineTrait: Clone + Send + Sync { +pub trait EvaluationEngineTrait: Clone + Send + Sync { /// A type that holds the prover key type ProverKey: Clone + Send + Sync + Serialize + for<'de> Deserialize<'de> + Abomonation; @@ -21,27 +21,27 @@ pub trait EvaluationEngineTrait: Clone + Send + Sync { /// A method to perform any additional setup needed to produce proofs of evaluations fn setup( - ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, + ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, ) -> (Self::ProverKey, Self::VerifierKey); /// A method to prove the evaluation of a multilinear polynomial fn prove( - ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, + ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, pk: &Self::ProverKey, - transcript: &mut G::TE, - comm: &<::CE as CommitmentEngineTrait>::Commitment, - poly: &[G::Scalar], - point: &[G::Scalar], - eval: &G::Scalar, + transcript: &mut E::TE, + comm: &<::CE as CommitmentEngineTrait>::Commitment, + poly: &[E::Scalar], + point: &[E::Scalar], + eval: &E::Scalar, ) -> Result; /// A method to verify the purported evaluation of a multilinear polynomials fn verify( vk: &Self::VerifierKey, - transcript: &mut G::TE, - comm: &<::CE as CommitmentEngineTrait>::Commitment, - point: &[G::Scalar], - eval: &G::Scalar, + transcript: &mut E::TE, + comm: &<::CE as CommitmentEngineTrait>::Commitment, + point: &[E::Scalar], + eval: &E::Scalar, arg: &Self::EvaluationArgument, ) -> Result<(), NovaError>; } diff --git a/src/traits/mod.rs b/src/traits/mod.rs index 6ac2812cf..3440cbb32 100644 --- a/src/traits/mod.rs +++ b/src/traits/mod.rs @@ -15,17 +15,32 @@ use commitment::CommitmentEngineTrait; /// This is currently tailored for an elliptic curve group pub trait Group: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { /// A type representing an element of the base field of the group - type Base: PrimeFieldBits + TranscriptReprTrait + Serialize + for<'de> Deserialize<'de>; + type Base: PrimeFieldBits + Serialize + for<'de> Deserialize<'de>; + + /// A type representing an element of the scalar field of the group + type Scalar: PrimeFieldBits + PrimeFieldExt + Send + Sync + Serialize + for<'de> Deserialize<'de>; + + /// Returns A, B, the order of the group, the size of the base field as big integers + fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt); +} + +/// A collection of engines that are required by the library +pub trait Engine: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { + /// A type representing an element of the base field of the group + type Base: PrimeFieldBits + TranscriptReprTrait + Serialize + for<'de> Deserialize<'de>; /// A type representing an element of the scalar field of the group type Scalar: PrimeFieldBits + PrimeFieldExt + Send + Sync - + TranscriptReprTrait + + TranscriptReprTrait + Serialize + for<'de> Deserialize<'de>; + /// A type that represents an element of the group + type GE: Group + Serialize + for<'de> Deserialize<'de>; + /// A type that represents a circuit-friendly sponge that consumes elements /// from the base field and squeezes out elements of the scalar field type RO: ROTrait; @@ -38,15 +53,12 @@ pub trait Group: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { /// A type that defines a commitment engine over scalars in the group type CE: CommitmentEngineTrait; - - /// Returns A, B, the order of the group, the size of the base field as big integers - fn get_curve_params() -> (Self::Base, Self::Base, BigInt, BigInt); } /// A helper trait to absorb different objects in RO -pub trait AbsorbInROTrait { +pub trait AbsorbInROTrait { /// Absorbs the value in the provided RO - fn absorb_in_ro(&self, ro: &mut G::RO); + fn absorb_in_ro(&self, ro: &mut E::RO); } /// A helper trait that defines the behavior of a hash function that we use as an RO @@ -96,18 +108,20 @@ pub trait ROCircuitTrait { fn absorb(&mut self, e: &AllocatedNum); /// Returns a challenge of `num_bits` by hashing the internal state - fn squeeze(&mut self, cs: CS, num_bits: usize) -> Result, SynthesisError> - where - CS: ConstraintSystem; + fn squeeze>( + &mut self, + cs: CS, + num_bits: usize, + ) -> Result, SynthesisError>; } -/// An alias for constants associated with `G::RO` -pub type ROConstants = - <::RO as ROTrait<::Base, ::Scalar>>::Constants; +/// An alias for constants associated with E::RO +pub type ROConstants = + <::RO as ROTrait<::Base, ::Scalar>>::Constants; -/// An alias for constants associated with `G::ROCircuit` -pub type ROConstantsCircuit = - <::ROCircuit as ROCircuitTrait<::Base>>::Constants; +/// An alias for constants associated with `E::ROCircuit` +pub type ROConstantsCircuit = + <::ROCircuit as ROCircuitTrait<::Base>>::Constants; /// This trait allows types to implement how they want to be added to `TranscriptEngine` pub trait TranscriptReprTrait: Send + Sync { @@ -116,15 +130,15 @@ pub trait TranscriptReprTrait: Send + Sync { } /// This trait defines the behavior of a transcript engine compatible with Spartan -pub trait TranscriptEngineTrait: Send + Sync { +pub trait TranscriptEngineTrait: Send + Sync { /// initializes the transcript fn new(label: &'static [u8]) -> Self; /// returns a scalar element of the group as a challenge - fn squeeze(&mut self, label: &'static [u8]) -> Result; + fn squeeze(&mut self, label: &'static [u8]) -> Result; /// absorbs any type that implements `TranscriptReprTrait` under a label - fn absorb>(&mut self, label: &'static [u8], o: &T); + fn absorb>(&mut self, label: &'static [u8], o: &T); /// adds a domain separator fn dom_sep(&mut self, bytes: &'static [u8]); diff --git a/src/traits/snark.rs b/src/traits/snark.rs index 1b2657bcb..b9c9b13d5 100644 --- a/src/traits/snark.rs +++ b/src/traits/snark.rs @@ -2,7 +2,7 @@ use crate::{ errors::NovaError, r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, - traits::Group, + traits::Engine, CommitmentKey, }; @@ -13,56 +13,51 @@ use serde::{Deserialize, Serialize}; /// the final compressing SNARK the user expected to use with these public parameters, and the below /// is a sensible default, which is to not require any more bases then the usual (maximum of the number of /// variables and constraints of the involved R1CS circuit). -pub fn default_ck_hint() -> Box Fn(&'a R1CSShape) -> usize> { +pub fn default_ck_hint() -> Box Fn(&'a R1CSShape) -> usize> { // The default is to not put an additional floor on the size of the commitment key - Box::new(|_shape: &R1CSShape| 0) + Box::new(|_shape: &R1CSShape| 0) } /// A trait that defines the behavior of a `zkSNARK` -pub trait RelaxedR1CSSNARKTrait: +pub trait RelaxedR1CSSNARKTrait: Send + Sync + Serialize + for<'de> Deserialize<'de> { /// A type that represents the prover's key type ProverKey: Send + Sync + Serialize + for<'de> Deserialize<'de> + Abomonation; /// A type that represents the verifier's key - type VerifierKey: Send - + Sync - + Serialize - + for<'de> Deserialize<'de> - + DigestHelperTrait - + Abomonation; + type VerifierKey: Send + Sync + Serialize + for<'de> Deserialize<'de> + DigestHelperTrait + Abomonation; /// This associated function (not a method) provides a hint that offers /// a minimum sizing cue for the commitment key used by this SNARK /// implementation. The commitment key passed in setup should then /// be at least as large as this hint. - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { // The default is to not put an additional floor on the size of the commitment key default_ck_hint() } /// Produces the keys for the prover and the verifier fn setup( - ck: &CommitmentKey, - S: &R1CSShape, + ck: &CommitmentKey, + S: &R1CSShape, ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>; /// Produces a new SNARK for a relaxed R1CS fn prove( - ck: &CommitmentKey, + ck: &CommitmentKey, pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, ) -> Result; /// Verifies a SNARK for a relaxed R1CS - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError>; + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError>; } /// A helper trait that defines the behavior of a verifier key of `zkSNARK` -pub trait DigestHelperTrait { +pub trait DigestHelperTrait { /// Returns the digest of the verifier's key - fn digest(&self) -> G::Scalar; + fn digest(&self) -> E::Scalar; }