From 79bb090fe80e529f6e506ab112137851d2f7ec4c Mon Sep 17 00:00:00 2001 From: Adrian Hamelink Date: Thu, 29 Feb 2024 18:08:02 +0100 Subject: [PATCH] - Unified transcript - Wrap ECC and EmulatedFieldElement - Make all structs CurveCycleEquipped - Specialize R1CS for both curves - Update Transcript --- Cargo.toml | 7 +- src/parafold/circuit.rs | 85 ++- src/parafold/cycle_fold/circuit.rs | 109 ++-- src/parafold/{ => cycle_fold/gadgets}/ecc.rs | 73 ++- src/parafold/cycle_fold/gadgets/emulated.rs | 169 ++++++ src/parafold/cycle_fold/gadgets/mod.rs | 4 + .../gadgets/secondary_commitment.rs | 80 +++ src/parafold/cycle_fold/mod.rs | 122 ++-- src/parafold/cycle_fold/nifs/circuit.rs | 215 +++++++ src/parafold/cycle_fold/nifs/mod.rs | 2 + src/parafold/cycle_fold/nifs/prover.rs | 191 +++++++ src/parafold/cycle_fold/prover.rs | 79 +-- src/parafold/mod.rs | 26 +- src/parafold/nifs/circuit.rs | 132 ++--- src/parafold/nifs/circuit_secondary.rs | 286 ---------- src/parafold/nifs/mod.rs | 89 ++- src/parafold/nifs/prover.rs | 458 +++++---------- src/parafold/nivc/circuit.rs | 526 +++++++++--------- src/parafold/nivc/mod.rs | 47 +- src/parafold/nivc/prover.rs | 356 ++++++------ src/parafold/prover.rs | 61 +- src/parafold/transcript/circuit.rs | 207 +++++-- src/parafold/transcript/mod.rs | 57 ++ src/parafold/transcript/prover.rs | 134 ++--- 24 files changed, 1896 insertions(+), 1619 deletions(-) rename src/parafold/{ => cycle_fold/gadgets}/ecc.rs (94%) create mode 100644 src/parafold/cycle_fold/gadgets/emulated.rs create mode 100644 src/parafold/cycle_fold/gadgets/mod.rs create mode 100644 src/parafold/cycle_fold/gadgets/secondary_commitment.rs create mode 100644 src/parafold/cycle_fold/nifs/circuit.rs create mode 100644 src/parafold/cycle_fold/nifs/mod.rs create mode 100644 src/parafold/cycle_fold/nifs/prover.rs delete mode 100644 src/parafold/nifs/circuit_secondary.rs diff --git a/Cargo.toml b/Cargo.toml index 78ba2ccc..0167c19c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,11 +9,12 @@ readme = "README.md" repository = "https://github.com/lurk-lab/arecibo" license-file = "LICENSE" keywords = ["zkSNARKs", "cryptography", "proofs"] -rust-version="1.71.0" +rust-version = "1.71.0" [dependencies] bellpepper-core = { version = "0.4.0", default-features = false } -bellpepper = { git="https://github.com/lurk-lab/bellpepper", branch="dev", default-features = false } +bellpepper = { git = "https://github.com/lurk-lab/bellpepper", branch = "dev", default-features = false } +bellpepper-emulated = { git = "https://github.com/lurk-lab/bellpepper-gadgets", branch = "main", default-features = false } ff = { version = "0.13.0", features = ["derive"] } digest = "0.10" halo2curves = { version = "0.6.0", features = ["bits", "derive_serde"] } @@ -23,7 +24,7 @@ rand_core = { version = "0.6", default-features = false } rand_chacha = "0.3" subtle = "2.5" pasta_curves = { version = "0.5.0", features = ["repr-c", "serde"] } -neptune = { git = "https://github.com/lurk-lab/neptune", branch="dev", default-features = false, features = ["abomonation"] } +neptune = { git = "https://github.com/lurk-lab/neptune", branch = "dev", default-features = false, features = ["abomonation"] } generic-array = "1.0.0" num-bigint = { version = "0.4", features = ["serde", "rand"] } num-traits = "0.2" diff --git a/src/parafold/circuit.rs b/src/parafold/circuit.rs index 83a6432f..1c02057d 100644 --- a/src/parafold/circuit.rs +++ b/src/parafold/circuit.rs @@ -1,8 +1,7 @@ use bellpepper_core::{ConstraintSystem, SynthesisError}; use crate::parafold::nivc::circuit::AllocatedNIVCState; -use crate::parafold::nivc::{NIVCMergeProof, NIVCUpdateProof, NIVCIO}; -use crate::parafold::transcript::circuit::AllocatedTranscript; +use crate::parafold::nivc::{NIVCUpdateProof, NIVCIO}; use crate::parafold::transcript::TranscriptConstants; use crate::supernova::StepCircuit; use crate::traits::CurveCycleEquipped; @@ -12,57 +11,55 @@ pub fn synthesize_step( ro_consts: &TranscriptConstants, proof: NIVCUpdateProof, step_circuit: &SF, -) -> Result, SynthesisError> +) -> Result, SynthesisError> where E: CurveCycleEquipped, CS: ConstraintSystem, SF: StepCircuit, { // Fold proof for previous state - let (mut state, transcript) = - AllocatedNIVCState::from_proof(cs.namespace(|| "verify self"), ro_consts, proof)?; + let mut state = AllocatedNIVCState::from_proof(cs.namespace(|| "verify self"), ro_consts, proof)?; - let io = state.update_io(cs.namespace(|| "step"), step_circuit); + let io_native = state.update_io(cs.namespace(|| "step"), step_circuit); - transcript.inputize(cs.namespace(|| "inputize transcript"))?; state.inputize(cs.namespace(|| "inputize state"))?; - io + io_native } -/// Circuit -pub fn synthesize_merge( - mut cs: CS, - ro_consts: &TranscriptConstants, - proof_L: NIVCUpdateProof, - proof_R: NIVCUpdateProof, - proof_merge: NIVCMergeProof, -) -> Result, SynthesisError> -where - E: CurveCycleEquipped, - CS: ConstraintSystem, -{ - // Verify L - let (self_L, transcript_L) = - AllocatedNIVCState::from_proof(cs.namespace(|| "verify proof_L"), ro_consts, proof_L)?; - // Verify R - let (self_R, transcript_R) = - AllocatedNIVCState::from_proof(cs.namespace(|| "verify proof_R"), ro_consts, proof_R)?; - // Merge transcripts - let mut transcript = AllocatedTranscript::merge(transcript_L, transcript_R); - - // Merge states - let (state, io_native) = AllocatedNIVCState::merge( - cs.namespace(|| "merge"), - self_L, - self_R, - ro_consts, - proof_merge, - &mut transcript, - )?; - - transcript.inputize(cs.namespace(|| "inputize transcript"))?; - state.inputize(cs.namespace(|| "inputize state"))?; - - Ok(io_native) -} +// /// Circuit +// pub fn synthesize_merge( +// mut cs: CS, +// ro_consts: &TranscriptConstants, +// proof_L: NIVCUpdateProof, +// proof_R: NIVCUpdateProof, +// proof_merge: NIVCMergeProof, +// ) -> Result, SynthesisError> +// where +// E: CurveCycleEquipped, +// CS: ConstraintSystem, +// { +// // Verify L +// let (self_L, transcript_L) = +// AllocatedNIVCState::from_proof(cs.namespace(|| "verify proof_L"), ro_consts, proof_L)?; +// // Verify R +// let (self_R, transcript_R) = +// AllocatedNIVCState::from_proof(cs.namespace(|| "verify proof_R"), ro_consts, proof_R)?; +// // Merge transcripts +// let mut transcript = AllocatedTranscript::merge(transcript_L, transcript_R); +// +// // Merge states +// let (state, io_native) = AllocatedNIVCState::merge( +// cs.namespace(|| "merge"), +// self_L, +// self_R, +// ro_consts, +// proof_merge, +// &mut transcript, +// )?; +// +// transcript.inputize(cs.namespace(|| "inputize transcript"))?; +// state.inputize(cs.namespace(|| "inputize state"))?; +// +// Ok(io_native) +// } diff --git a/src/parafold/cycle_fold/circuit.rs b/src/parafold/cycle_fold/circuit.rs index c1baa95c..83dd67bb 100644 --- a/src/parafold/cycle_fold/circuit.rs +++ b/src/parafold/cycle_fold/circuit.rs @@ -1,107 +1,92 @@ -use bellpepper_core::num::AllocatedNum; -use bellpepper_core::{ConstraintSystem, SynthesisError}; -use itertools::{chain, zip_eq}; +use bellpepper_core::boolean::Boolean; +use bellpepper_core::{ConstraintSystem, SynthesisError, Variable}; -use crate::parafold::cycle_fold::AllocatedHashedCommitment; -use crate::parafold::nifs::circuit_secondary::AllocatedSecondaryRelaxedR1CSInstance; -use crate::parafold::nifs::FoldProof; +use crate::parafold::cycle_fold::gadgets::emulated::AllocatedBase; +use crate::parafold::cycle_fold::nifs::circuit::AllocatedSecondaryRelaxedR1CSInstance; +use crate::parafold::cycle_fold::{AllocatedPrimaryCommitment, NUM_IO_SECONDARY}; use crate::parafold::transcript::circuit::AllocatedTranscript; -use crate::traits::{CurveCycleEquipped, Engine}; +use crate::traits::CurveCycleEquipped; #[derive(Debug, Clone)] -pub struct AllocatedScalarMulAccumulator { +pub struct AllocatedScalarMulAccumulator { deferred: Vec>, + acc: AllocatedSecondaryRelaxedR1CSInstance, } -impl AllocatedScalarMulAccumulator { - pub fn new() -> Self { - Self { deferred: vec![] } +impl AllocatedScalarMulAccumulator { + pub fn new(acc: AllocatedSecondaryRelaxedR1CSInstance) -> Self { + Self { + deferred: vec![], + acc, + } } /// Compute the result `C <- A + x * B` by folding a proof over the secondary curve. pub fn scalar_mul( &mut self, mut cs: CS, - A: AllocatedHashedCommitment, - B: AllocatedHashedCommitment, - x: AllocatedNum, - transcript: &mut AllocatedTranscript, - ) -> Result, SynthesisError> + A: AllocatedPrimaryCommitment, + B: AllocatedPrimaryCommitment, + x_bits: Vec, + transcript: &mut AllocatedTranscript, + ) -> Result, SynthesisError> where CS: ConstraintSystem, { - let A_value = A.value; - let B_value = B.value; - let x_value = x.get_value().ok_or(SynthesisError::AssignmentMissing)?; - let C_value = A_value + B_value * x_value; - let C = AllocatedHashedCommitment::alloc_transcript( - cs.namespace(|| "alloc output"), - C_value, - transcript, - ); + let C = transcript.read_commitment_primary(cs.namespace(|| "transcript C"))?; self.deferred.push(AllocatedScalarMulInstance { A, B, - x, + x_bits, C: C.clone(), }); Ok(C) } - /// Merges another existing [AllocatedScalarMulAccumulator] into `self` - pub fn merge(mut self_L: Self, mut self_R: Self) -> Self { - self_L.deferred.append(&mut self_R.deferred); - self_L - } -} - -impl AllocatedScalarMulAccumulator { pub fn finalize( - self, + mut self, mut cs: CS, - mut acc_cf: AllocatedSecondaryRelaxedR1CSInstance, - proofs: impl IntoIterator>, - transcript: &mut AllocatedTranscript, + transcript: &mut AllocatedTranscript, ) -> Result, SynthesisError> where CS: ConstraintSystem, { - for (instance, proof) in zip_eq(self.deferred, proofs) { - let AllocatedScalarMulInstance { A, B, x, C } = instance; - let _X_tmp: Vec<_> = chain![A.as_preimage(), B.as_preimage(), [x], C.as_preimage()].collect(); + for instance in self.deferred.drain(..) { + let X = instance.to_io(CS::one()); // TODO: In order to avoid computing unnecessary proofs, we can check // - x = 0 => C = A - - // Convert the elements in the instance to a bignum modulo E1::Base. - // Since |E1::Scalar| < |E1::Base|, we can create the limbs without an initial bound-check - // We should check here that the limbs are of the right size, but not-necessarily bound check them. - // X = [A.as_bignum(), B.as_bignum(), x.as_bignum(), C.as_bignum()] - let X = vec![]; - acc_cf.fold(cs.namespace(|| "fold cf instance"), X, proof, transcript)?; + self + .acc + .fold(cs.namespace(|| "fold cf instance"), X, transcript)?; } - Ok(acc_cf) + Ok(self.acc) + } + + pub fn is_finalized(&self) -> bool { + self.deferred.is_empty() } } #[derive(Debug, Clone)] -pub struct AllocatedScalarMulInstance { - A: AllocatedHashedCommitment, - B: AllocatedHashedCommitment, - x: AllocatedNum, - C: AllocatedHashedCommitment, +pub struct AllocatedScalarMulInstance { + A: AllocatedPrimaryCommitment, + B: AllocatedPrimaryCommitment, + x_bits: Vec, + C: AllocatedPrimaryCommitment, } -impl AllocatedScalarMulInstance { - pub fn as_preimage(&self) -> impl IntoIterator> + '_ { - chain![ - self.A.as_preimage(), - self.B.as_preimage(), - [self.x.clone()], - self.C.as_preimage() - ] +impl AllocatedScalarMulInstance { + fn to_io(self, one: Variable) -> [AllocatedBase; NUM_IO_SECONDARY] { + let Self { A, B, x_bits, C } = self; + + // Convert the elements in the instance to a bignum modulo E1::Base. + // Since |E1::Scalar| < |E1::Base|, we can create the limbs without an initial bound-check + // We should check here that the limbs are of the right size, but not-necessarily bound check them. + let x = AllocatedBase::from_bits(one, &x_bits); + [A.hash, B.hash, x, C.hash] } } diff --git a/src/parafold/ecc.rs b/src/parafold/cycle_fold/gadgets/ecc.rs similarity index 94% rename from src/parafold/ecc.rs rename to src/parafold/cycle_fold/gadgets/ecc.rs index 96736ac0..deddac09 100644 --- a/src/parafold/ecc.rs +++ b/src/parafold/cycle_fold/gadgets/ecc.rs @@ -3,16 +3,14 @@ use bellpepper_core::boolean::{AllocatedBit, Boolean}; use bellpepper_core::num::AllocatedNum; use bellpepper_core::{ConstraintSystem, SynthesisError}; use ff::{Field, PrimeField}; +use neptune::circuit2::Elt; use crate::gadgets::utils::{ alloc_num_equals, alloc_one, alloc_zero, conditionally_select, conditionally_select2, select_num_or_one, select_num_or_zero, select_num_or_zero2, select_one_or_diff2, select_one_or_num2, select_zero_or_num2, }; -use crate::parafold::transcript::circuit::AllocatedTranscript; -use crate::traits::commitment::CommitmentTrait; -use crate::traits::{CurveCycleEquipped, Engine, Group}; -use crate::Commitment; +use crate::traits::Group; /// `AllocatedPoint` provides an elliptic curve abstraction inside a circuit. #[derive(Debug, Clone)] @@ -57,44 +55,46 @@ impl AllocatedPoint { ); } - pub fn alloc_transcript( - mut cs: CS, - c: Commitment, - transcript: &mut AllocatedTranscript, - ) -> Self + pub fn alloc(mut cs: CS, coords: (G::Base, G::Base, bool)) -> Self where CS: ConstraintSystem, - E: CurveCycleEquipped, - E2: Engine, { - let c = Self::alloc(&mut cs, Some(c.to_coordinates())).unwrap(); - c.check_on_curve(cs.namespace(|| "check on curve")).unwrap(); - transcript.absorb([c.x.clone(), c.y.clone()]); - c + let commitment = Self::alloc_unchecked(cs.namespace(|| "alloc unchecked"), coords); + cs.enforce( + || "is_infinity => x = 0", + |lc| lc + commitment.is_infinity.get_variable(), + |lc| lc + commitment.x.get_variable(), + |lc| lc, + ); + cs.enforce( + || "is_infinity => y = 0", + |lc| lc + commitment.is_infinity.get_variable(), + |lc| lc + commitment.y.get_variable(), + |lc| lc, + ); + commitment + .check_on_curve(cs.namespace(|| "curve check")) + .unwrap(); + commitment } /// Allocates a new point on the curve using coordinates provided by `coords`. /// If coords = None, it allocates the default infinity point - pub fn alloc( - mut cs: CS, - coords: Option<(G::Base, G::Base, bool)>, - ) -> Result + pub fn alloc_unchecked(mut cs: CS, coords: (G::Base, G::Base, bool)) -> Self where CS: ConstraintSystem, { - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - Ok(coords.map_or(G::Base::ZERO, |c| c.0)) - })?; - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok(coords.map_or(G::Base::ZERO, |c| c.1)) - })?; - let is_infinity = AllocatedNum::alloc(cs.namespace(|| "is_infinity"), || { - Ok(if coords.map_or(true, |c| c.2) { + let (x, y, is_infinity) = coords; + let x = AllocatedNum::alloc_infallible(cs.namespace(|| "x"), || x); + let y = AllocatedNum::alloc_infallible(cs.namespace(|| "y"), || y); + + let is_infinity = AllocatedNum::alloc_infallible(cs.namespace(|| "is_infinity"), || { + if is_infinity { G::Base::ONE } else { G::Base::ZERO - }) - })?; + } + }); cs.enforce( || "is_infinity is bit", |lc| lc + is_infinity.get_variable(), @@ -102,7 +102,7 @@ impl AllocatedPoint { |lc| lc, ); - Ok(Self { x, y, is_infinity }) + Self { x, y, is_infinity } } /// checks if `self` is on the curve or if it is infinity @@ -520,11 +520,7 @@ impl AllocatedPoint { /// A gadget for scalar multiplication, optimized to use incomplete addition law. /// The optimization here is analogous to , /// except we use complete addition law over affine coordinates instead of projective coordinates for the tail bits - pub fn scalar_mul( - &self, - mut cs: CS, - scalar_bits: &[AllocatedBit], - ) -> Result + pub fn scalar_mul(&self, mut cs: CS, scalar_bits: &[Boolean]) -> Result where CS: ConstraintSystem, { @@ -657,8 +653,11 @@ impl AllocatedPoint { Ok(Self { x, y, is_infinity }) } - pub fn as_preimage(&self) -> impl IntoIterator> { - [self.x.clone(), self.y.clone(), self.is_infinity.clone()] + pub fn as_preimage(&self) -> impl IntoIterator> { + [ + Elt::Allocated(self.x.clone()), + Elt::Allocated(self.y.clone()), + ] } } diff --git a/src/parafold/cycle_fold/gadgets/emulated.rs b/src/parafold/cycle_fold/gadgets/emulated.rs new file mode 100644 index 00000000..a98eaf31 --- /dev/null +++ b/src/parafold/cycle_fold/gadgets/emulated.rs @@ -0,0 +1,169 @@ +use std::marker::PhantomData; + +use bellpepper_core::boolean::Boolean; +use bellpepper_core::num::Num; +use bellpepper_core::{ConstraintSystem, SynthesisError, Variable}; +use bellpepper_emulated::field_element::{ + EmulatedFieldElement, EmulatedFieldParams, EmulatedLimbs, PseudoMersennePrime, +}; +use ff::{Field, PrimeField}; +use itertools::{zip_eq, Itertools}; +use neptune::circuit2::Elt; +use num_bigint::{BigInt, Sign}; +use num_traits::{Num as num_Num, One}; + +use crate::constants::{BN_LIMB_WIDTH, BN_N_LIMBS}; +use crate::traits::CurveCycleEquipped; + +#[derive(Debug, Clone)] +pub struct BaseParams(PhantomData); + +impl EmulatedFieldParams for BaseParams { + fn num_limbs() -> usize { + BN_N_LIMBS + } + + fn bits_per_limb() -> usize { + BN_LIMB_WIDTH + } + + fn modulus() -> BigInt { + BigInt::from_str_radix(&E::Base::MODULUS[2..], 16).unwrap() + } + + fn is_modulus_pseudo_mersenne() -> bool { + true + } + fn pseudo_mersenne_params() -> Option { + let p = Self::modulus(); + let e: u32 = p.bits().try_into().unwrap(); + let two_pow_e = BigInt::one() << e; + let c = two_pow_e - p; + Some(PseudoMersennePrime { e, c }) + } +} + +#[derive(Debug, Clone)] +pub struct AllocatedBase(EmulatedFieldElement>); + +impl AllocatedBase { + pub fn zero() -> Self { + Self(EmulatedFieldElement::zero()) + } + + pub fn from_bits(one: Variable, bits: &[Boolean]) -> Self { + let bases = std::iter::successors(Some(E::Scalar::ONE), |base| Some(base.double())) + .take(BaseParams::::bits_per_limb()) + .collect::>(); + + let limbs = bits + .chunks(BaseParams::::bits_per_limb()) + .map(|bits| { + zip_eq(&bases, bits).fold(Num::::zero(), |num, (base, bit)| { + num.add_bool_with_coeff(one, &bit, base.clone()) + }) + }) + .pad_using(BaseParams::::num_limbs(), |_| Num::zero()) + .collect::>(); + + assert_eq!(limbs.len(), BaseParams::::num_limbs()); + + Self(EmulatedFieldElement::new_internal_element( + EmulatedLimbs::Allocated(limbs), + 0, + )) + } + + pub fn alloc>(mut cs: CS, base: E::Base) -> Self { + let base = Self::alloc_unchecked(cs.namespace(|| "alloc unchecked"), base); + base + .0 + .check_field_membership(&mut cs.namespace(|| "check membership")) + .unwrap(); + base + } + + pub fn alloc_unchecked>(mut cs: CS, base: E::Base) -> Self { + let base = BigInt::from_bytes_le(Sign::Plus, base.to_repr().as_ref()); + let base = EmulatedFieldElement::from(&base) + .allocate_field_element_unchecked(&mut cs.namespace(|| "alloc unchecked")) + .unwrap(); + Self(base) + } + + pub fn as_preimage(&self) -> impl IntoIterator> { + // Merge into two 128-bit limbs for more efficient hashing + let limbs = self.0.compact_limbs(2, 128).unwrap(); + let EmulatedLimbs::Allocated(limbs) = limbs else { + unreachable!() + }; + limbs.into_iter().map(Elt::Num) + } + + pub fn add>( + &self, + mut cs: CS, + other: &Self, + ) -> Result { + let res = self.0.add(&mut cs.namespace(|| "add"), &other.0)?; + let res = res.reduce(&mut cs.namespace(|| "reduce"))?; + Ok(Self(res)) + } + + pub fn lc>( + &self, + mut cs: CS, + scalar: &Self, + other: &Self, + ) -> Result { + let res = other.0.mul(&mut cs.namespace(|| "mul"), &scalar.0)?; + let res = res.add(&mut cs.namespace(|| "add"), &self.0)?; + let res = res.reduce(&mut cs.namespace(|| "reduce"))?; + Ok(Self(res)) + } + + pub fn conditionally_select( + mut cs: CS, + a0: &Self, + a1: &Self, + condition: &Boolean, + ) -> Result + where + CS: ConstraintSystem, + { + Ok(Self(EmulatedFieldElement::conditionally_select( + &mut cs, &a0.0, &a1.0, condition, + )?)) + } + + fn to_big_int(self) -> BigInt { + (&self.0).into() + } +} + +#[cfg(test)] +mod tests { + use bellpepper_core::test_cs::TestConstraintSystem; + + use crate::provider::Bn256EngineKZG as E; + use crate::traits::Engine; + + use super::*; + + type Scalar = ::Scalar; + type Base = ::Base; + + #[test] + fn test_alloc() { + let cases = [Base::ZERO, Base::ONE, Base::ZERO - Base::ONE]; + let mut cs = TestConstraintSystem::::new(); + for (i, base) in cases.into_iter().enumerate() { + let _base_allocated = AllocatedBase::::alloc(cs.namespace(|| format!("alloc {i}")), base); + } + + if !cs.is_satisfied() { + println!("{:?}", cs.which_is_unsatisfied()); + } + assert!(cs.is_satisfied()); + } +} diff --git a/src/parafold/cycle_fold/gadgets/mod.rs b/src/parafold/cycle_fold/gadgets/mod.rs new file mode 100644 index 00000000..065fc3fc --- /dev/null +++ b/src/parafold/cycle_fold/gadgets/mod.rs @@ -0,0 +1,4 @@ +#[allow(dead_code)] +mod ecc; +pub mod emulated; +pub mod secondary_commitment; diff --git a/src/parafold/cycle_fold/gadgets/secondary_commitment.rs b/src/parafold/cycle_fold/gadgets/secondary_commitment.rs new file mode 100644 index 00000000..2f265c05 --- /dev/null +++ b/src/parafold/cycle_fold/gadgets/secondary_commitment.rs @@ -0,0 +1,80 @@ +use bellpepper_core::boolean::Boolean; +use bellpepper_core::{ConstraintSystem, SynthesisError}; +use neptune::circuit2::Elt; + +use crate::parafold::cycle_fold::gadgets::ecc::AllocatedPoint; +use crate::traits::commitment::CommitmentTrait; +use crate::traits::{CurveCycleEquipped, Engine}; +use crate::Commitment; + +#[derive(Debug, Clone)] +pub struct AllocatedSecondaryCommitment { + commitment: AllocatedPoint<::GE>, +} + +impl AllocatedSecondaryCommitment { + /// Allocates a new point on the curve using coordinates provided by `coords`. + /// If coords = None, it allocates the default infinity point + pub fn alloc_unchecked(mut cs: CS, commitment: Commitment) -> Self + where + CS: ConstraintSystem, + { + Self { + commitment: AllocatedPoint::<::GE>::alloc_unchecked( + cs.namespace(|| "alloc point unchecked"), + commitment.to_coordinates(), + ), + } + } + + pub fn alloc(mut cs: CS, commitment: Commitment) -> Self + where + CS: ConstraintSystem, + { + Self { + commitment: AllocatedPoint::alloc( + cs.namespace(|| "alloc point"), + commitment.to_coordinates(), + ), + } + } + + pub fn as_preimage(&self) -> impl IntoIterator> + '_ { + self.commitment.as_preimage() + } + + pub fn lc( + &self, + mut cs: CS, + scalar_bits: &[Boolean], + other: &Self, + ) -> Result + where + CS: ConstraintSystem, + { + let res = other + .commitment + .scalar_mul(cs.namespace(|| "scalar * other"), scalar_bits)? + .add(cs.namespace(|| "self + (scalar * other)"), &self.commitment)?; + Ok(Self { commitment: res }) + } + + pub fn select_default(self, mut cs: CS, is_default: &Boolean) -> Result + where + CS: ConstraintSystem, + { + let res = self + .commitment + .select_default(cs.namespace(|| "select default"), is_default)?; + Ok(Self { commitment: res }) + } + + pub fn enforce_trivial(&self, mut cs: CS, is_trivial: &Boolean) + where + CS: ConstraintSystem, + { + self + .commitment + .enforce_trivial(cs.namespace(|| "enforce trivial"), is_trivial) + } +} diff --git a/src/parafold/cycle_fold/mod.rs b/src/parafold/cycle_fold/mod.rs index 9179b1a0..68d633f5 100644 --- a/src/parafold/cycle_fold/mod.rs +++ b/src/parafold/cycle_fold/mod.rs @@ -1,19 +1,34 @@ -use bellpepper_core::num::AllocatedNum; use bellpepper_core::ConstraintSystem; -use ff::{Field, PrimeFieldBits}; -use neptune::generic_array::typenum::U2; +use digest::consts::U2; +use ff::Field; +use neptune::circuit2::Elt; use neptune::poseidon::PoseidonConstants; use neptune::Poseidon; -use crate::constants::{BN_LIMB_WIDTH, BN_N_LIMBS}; -use crate::parafold::transcript::circuit::AllocatedTranscript; +use crate::parafold::cycle_fold::gadgets::emulated::AllocatedBase; use crate::traits::commitment::CommitmentTrait; -use crate::traits::Engine; +use crate::traits::CurveCycleEquipped; use crate::Commitment; +const NUM_IO_SECONDARY: usize = 4; + pub mod circuit; +pub mod gadgets; +pub mod nifs; pub mod prover; +pub fn hash_commitment(commitment: Commitment) -> E::Base { + // TODO: Find a way to cache this + let constants = PoseidonConstants::::new(); + + let (x, y, infinity) = commitment.to_coordinates(); + if infinity { + E::Base::ZERO + } else { + Poseidon::new_with_preimage(&[x, y], &constants).hash() + } +} + /// Compressed representation of a [Commitment] for a proof over the [Engine]'s scalar field. /// /// # Details @@ -44,57 +59,6 @@ pub mod prover; /// When folding a proof for the above IO on the primary curve, each IO elements leads to a non-native "multiply-add", /// so this additional hashing that occurs in the secondary circuit ensure we only need to perform this expensive /// operation 4 times. Moreover, the fact that r { - point: Commitment, - // Poseidon hash of (x,y) = point. We set hash = 0 when `point` = infinity - hash: E::Base, - // E1 representation of `hash` with `BN_N_LIMBS` limbs of BN_LIMB_WIDTH bits. - hash_limbs: [E::Scalar; BN_N_LIMBS], -} - -impl HashedCommitment { - /// Convert a [Commitment] to it's compressed representation. - pub fn new(point: Commitment) -> Self { - let constants = PoseidonConstants::::new(); - let (x, y, infinity) = point.to_coordinates(); - if infinity { - Self { - point, - hash: E::Base::ZERO, - hash_limbs: [E::Scalar::ZERO; BN_N_LIMBS], - } - } else { - let hash = Poseidon::new_with_preimage(&[x, y], &constants).hash(); - let hash_limbs = hash - .to_le_bits() - .chunks_exact(BN_LIMB_WIDTH) - .map(|limb_bits| { - // TODO: Find more efficient trick - let mut limb = E::Scalar::ZERO; - for bit in limb_bits.iter().rev() { - // double limb - limb += limb; - if *bit { - limb += E::Scalar::ONE; - } - } - limb - }) - .collect::>(); - - Self { - point, - hash, - hash_limbs: hash_limbs.try_into().unwrap(), - } - } - } - - pub fn as_preimage(&self) -> impl IntoIterator { - self.hash_limbs - } -} /// Allocated [HashedCommitment] /// @@ -106,42 +70,22 @@ impl HashedCommitment { /// - Investigate whether a `is_infinity` flag is needed. It could be used to avoid synthesizing secondary circuits /// when the scalar multiplication is trivial. #[derive(Debug, Clone)] -pub struct AllocatedHashedCommitment { - value: Commitment, +pub struct AllocatedPrimaryCommitment { // hash = if let Some(point) = value { H_secondary(point) } else { 0 } - hash_limbs: [AllocatedNum; BN_N_LIMBS], + // TODO: Should this be a BigNat? + pub(crate) hash: AllocatedBase, } -impl AllocatedHashedCommitment { - pub fn alloc(mut cs: CS, c: Commitment) -> Self - where - CS: ConstraintSystem, - { - let hashed = HashedCommitment::::new(c); - let hash_limbs = hashed - .hash_limbs - .map(|limb| AllocatedNum::alloc_infallible(cs.namespace(|| "alloc limb"), || limb)); - - Self { - value: c, - hash_limbs, - } - } - - pub fn alloc_transcript( - mut cs: CS, - c: Commitment, - transcript: &mut AllocatedTranscript, - ) -> Self - where - CS: ConstraintSystem, - { - let c = AllocatedHashedCommitment::alloc(&mut cs, c); - transcript.absorb(c.as_preimage()); - c +impl AllocatedPrimaryCommitment { + pub fn alloc>(mut cs: CS, commitment: Commitment) -> Self { + let hash = AllocatedBase::alloc( + cs.namespace(|| "alloc hash"), + hash_commitment::(commitment), + ); + Self { hash } } - pub fn as_preimage(&self) -> impl IntoIterator> { - self.hash_limbs.clone() + pub fn as_preimage(&self) -> impl IntoIterator> { + self.hash.as_preimage() } } diff --git a/src/parafold/cycle_fold/nifs/circuit.rs b/src/parafold/cycle_fold/nifs/circuit.rs new file mode 100644 index 00000000..429ff729 --- /dev/null +++ b/src/parafold/cycle_fold/nifs/circuit.rs @@ -0,0 +1,215 @@ +use bellpepper_core::boolean::Boolean; +use bellpepper_core::{ConstraintSystem, SynthesisError}; +use itertools::{chain, zip_eq}; +use neptune::circuit2::Elt; + +use crate::constants::NUM_CHALLENGE_BITS; +use crate::parafold::cycle_fold::gadgets::emulated::AllocatedBase; +use crate::parafold::cycle_fold::gadgets::secondary_commitment::AllocatedSecondaryCommitment; +use crate::parafold::cycle_fold::nifs::prover::RelaxedSecondaryR1CSInstance; +use crate::parafold::cycle_fold::NUM_IO_SECONDARY; +use crate::parafold::transcript::circuit::AllocatedTranscript; +use crate::traits::CurveCycleEquipped; + +#[derive(Debug, Clone)] +pub struct AllocatedSecondaryRelaxedR1CSInstance { + pub u: AllocatedBase, + pub X: Vec>, + pub W: AllocatedSecondaryCommitment, + pub E: AllocatedSecondaryCommitment, +} + +impl AllocatedSecondaryRelaxedR1CSInstance { + pub fn fold( + &mut self, + mut cs: CS, + X_new: [AllocatedBase; NUM_IO_SECONDARY], + transcript: &mut AllocatedTranscript, + ) -> Result<(), SynthesisError> + where + CS: ConstraintSystem, + { + let W_new = transcript.read_commitment_secondary(cs.namespace(|| "transcript W_new"))?; + let T = transcript.read_commitment_secondary(cs.namespace(|| "transcript T"))?; + + // Get challenge `r` but truncate the bits for more efficient scalar multiplication + let r_bits = transcript.squeeze_bits(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; + + let r = AllocatedBase::from_bits(CS::one(), &r_bits); + + let Self { + u: u_curr, + X: X_curr, + W: W_curr, + E: E_curr, + } = self; + + // We have to do a full modular reduction since merging will make `u` full-sized + let u_next = u_curr.add(cs.namespace(|| "u_next = u_curr + r % q"), &r)?; + let X_next = zip_eq(X_curr, X_new) + .enumerate() + .map(|(i, (x_curr, x_new))| { + x_curr.lc( + cs.namespace(|| format!("(x_curr[{i}] + (x_new[{i}] * r)) % q")), + &r, + &x_new, + ) + }) + .collect::, _>>()?; + + // Scalar multiplications + let W_next = W_curr.lc(cs.namespace(|| "W_curr + r * W_new"), &r_bits, &W_new)?; + let E_next = E_curr.lc(cs.namespace(|| "E_curr + r * T"), &r_bits, &T)?; + + *self = Self { + u: u_next, + X: X_next, + W: W_next, + E: E_next, + }; + + Ok(()) + } + + // pub fn merge( + // mut cs: CS, + // self_L: Self, + // self_R: Self, + // transcript: &mut AllocatedTranscript, + // ) -> Result + // where + // CS: ConstraintSystem, + // { + // // Allocate T from transcript + // let T = AllocatedPoint::<::GE>::alloc_transcript( + // cs.namespace(|| "alloc T"), + // transcript, + // ); + // + // // Get truncated challenge + // let r_bits = transcript.squeeze_bits(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; + // let r = BaseParams::base_from_bits(CS::one(), &r_bits); + // + // let Self { + // u: u_L, + // X: X_L, + // W: W_L, + // E: E_L, + // } = self_L; + // let Self { + // u: u_R, + // X: X_R, + // W: W_R, + // E: E_R, + // } = self_R; + // + // let u_next = u_R + // .mul(&mut cs.namespace(|| "u_R * r"), &r)? + // .add(&mut cs.namespace(|| "(u_R * r) + u_L"), &u_L)? + // .reduce(&mut cs.namespace(|| "((u_R * r) + u_L) % q"))?; + // let X_next = zip_eq(X_L, X_R) + // .enumerate() + // .map(|(i, (x_L, x_R))| { + // x_R + // .mul(&mut cs.namespace(|| "x_R * r"), &r)? + // .add(&mut cs.namespace(|| "(x_R * r) + x_L"), &x_L)? + // .reduce(&mut cs.namespace(|| "((x_R * r) + x_L) % q")) + // }) + // .collect::, _>>()?; + // + // let W_next = W_R + // .scalar_mul(cs.namespace(|| "r * W_R"), &r_bits)? + // .add(cs.namespace(|| "W_L + r * W_R"), &W_L)?; + // let E_next = { + // let E_tmp = E_R + // .scalar_mul(cs.namespace(|| "r * E_R"), &r_bits)? + // .add(cs.namespace(|| "T + r * E_R"), &T)?; + // E_tmp + // .scalar_mul(cs.namespace(|| "r * E_tmp"), &r_bits)? + // .add(cs.namespace(|| "E_L + r * E_tmp"), &E_L)? + // }; + // + // Ok(Self { + // u: u_next, + // X: X_next, + // W: W_next, + // E: E_next, + // }) + // } + + pub fn enforce_trivial(&self, mut cs: CS, is_trivial: &Boolean) + where + CS: ConstraintSystem, + { + // TODO: If is_trivial + // u = 0 + // X = [0, ..., 0] + self + .W + .enforce_trivial(cs.namespace(|| "enforce trivial W"), is_trivial); + self + .E + .enforce_trivial(cs.namespace(|| "enforce trivial E"), is_trivial); + } + + /// Allocate and add the result to the transcript + pub fn alloc_unchecked(mut cs: CS, instance: RelaxedSecondaryR1CSInstance) -> Self + where + CS: ConstraintSystem, + { + let u = AllocatedBase::alloc_unchecked(cs.namespace(|| "read u"), instance.u); + let X = instance + .X + .into_iter() + .enumerate() + .map(|(i, x)| AllocatedBase::alloc_unchecked(cs.namespace(|| format!("read x[{i}]")), x)) + .collect(); + let W = + AllocatedSecondaryCommitment::::alloc_unchecked(cs.namespace(|| "read W"), instance.W); + let E = + AllocatedSecondaryCommitment::::alloc_unchecked(cs.namespace(|| "read E"), instance.E); + Self { u, X, W, E } + } + + pub fn select_default(self, mut cs: CS, is_default: &Boolean) -> Result + where + CS: ConstraintSystem, + { + let zero = AllocatedBase::::zero(); + let u = AllocatedBase::conditionally_select( + &mut cs.namespace(|| "select u"), + &self.u, + &zero, + is_default.into(), + )?; + let X = self + .X + .iter() + .enumerate() + .map(|(i, x)| { + AllocatedBase::conditionally_select( + &mut cs.namespace(|| format!("select X[{i}]")), + &x, + &zero, + is_default.into(), + ) + }) + .collect::, _>>()?; + let W = self + .W + .select_default(cs.namespace(|| "select W"), is_default)?; + let E = self + .E + .select_default(cs.namespace(|| "select E"), is_default)?; + Ok(Self { u, X, W, E }) + } + + pub fn as_preimage(&self) -> impl IntoIterator> + '_ { + chain![ + self.u.as_preimage(), + self.X.iter().map(|x| x.as_preimage()).flatten(), + self.W.as_preimage(), + self.E.as_preimage() + ] + } +} diff --git a/src/parafold/cycle_fold/nifs/mod.rs b/src/parafold/cycle_fold/nifs/mod.rs new file mode 100644 index 00000000..e41bd94d --- /dev/null +++ b/src/parafold/cycle_fold/nifs/mod.rs @@ -0,0 +1,2 @@ +pub mod circuit; +pub mod prover; diff --git a/src/parafold/cycle_fold/nifs/prover.rs b/src/parafold/cycle_fold/nifs/prover.rs new file mode 100644 index 00000000..a2a85bde --- /dev/null +++ b/src/parafold/cycle_fold/nifs/prover.rs @@ -0,0 +1,191 @@ +use ff::Field; +use itertools::{chain, Itertools}; +use rayon::prelude::*; + +use crate::constants::NUM_CHALLENGE_BITS; +use crate::parafold::cycle_fold::NUM_IO_SECONDARY; +use crate::parafold::nifs::compute_fold_proof; +use crate::parafold::transcript::prover::Transcript; +use crate::parafold::transcript::TranscriptElement; +use crate::r1cs::R1CSShape; +use crate::traits::commitment::CommitmentEngineTrait; +use crate::traits::{CurveCycleEquipped, Engine}; +use crate::{Commitment, CommitmentKey}; + +/// Instance of a Relaxed-R1CS accumulator for a circuit. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RelaxedSecondaryR1CSInstance { + pub u: E::Base, + pub X: Vec, + pub W: Commitment, + pub E: Commitment, +} + +/// A full Relaxed-R1CS accumulator for a circuit +/// # TODO: +/// It would make sense to store the [R1CSShape] here since +/// - There is only one accumulator per shape +/// - We can probably use an Arc to avoid copying +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RelaxedSecondaryR1CS { + instance: RelaxedSecondaryR1CSInstance, + W: Vec, + E: Vec, +} + +impl RelaxedSecondaryR1CS { + pub fn new(shape: &R1CSShape) -> Self { + assert_eq!(shape.num_io, NUM_IO_SECONDARY); + Self { + instance: RelaxedSecondaryR1CSInstance { + u: E::Base::ZERO, + X: vec![E::Base::ZERO; NUM_IO_SECONDARY], + W: Commitment::::default(), + E: Commitment::::default(), + }, + W: vec![E::Base::ZERO; shape.num_vars], + E: vec![E::Base::ZERO; shape.num_cons], + } + } + + pub fn instance(&self) -> &RelaxedSecondaryR1CSInstance { + &self.instance + } + + pub fn simulate_fold(transcript: &mut Transcript) { + let W = Commitment::::default(); + let T = Commitment::::default(); + transcript.absorb(TranscriptElement::CommitmentSecondary(W)); + transcript.absorb(TranscriptElement::CommitmentSecondary(T)); + + let _r = transcript.squeeze(); + } + + pub fn fold( + &mut self, + ck: &CommitmentKey, + shape: &R1CSShape, + X_new: Vec, + W_new: &[E::Base], + transcript: &mut Transcript, + ) { + // TODO: Parallelize both of these operations + let W_comm_new = { ::CE::commit(ck, W_new) }; + let (T, T_comm) = { + compute_fold_proof( + ck, + shape, + self.instance.u, + &self.instance.X, + &self.W, + None, + &X_new, + W_new, + ) + }; + + transcript.absorb(TranscriptElement::CommitmentSecondary(W_comm_new)); + transcript.absorb(TranscriptElement::CommitmentSecondary(T_comm)); + + // TODO: Squeeze + let r_bits = transcript.squeeze_bits(NUM_CHALLENGE_BITS); + let r = { + r_bits.into_iter().fold(E::Base::ZERO, |acc: E::Base, bit| { + let mut acc = acc.double(); + if bit { + acc += E::Base::ONE; + } + acc + }) + }; + + self + .W + .par_iter_mut() + .zip_eq(W_new.par_iter()) + .for_each(|(w, w_new)| *w += r * w_new); + self + .E + .par_iter_mut() + .zip_eq(T.par_iter()) + .for_each(|(e, t)| *e += r * t); + + // For non-relaxed instances, u_new = 1 + self.instance.u += r; + self + .instance + .X + .iter_mut() + .zip_eq(X_new) + .for_each(|(x, x_new)| *x += r * x_new); + self.instance.W = self.instance.W + W_comm_new * r; + self.instance.E = self.instance.E + T_comm * r; + } + + // pub fn merge( + // ck: &CommitmentKey, + // shape: &R1CSShape, + // acc_L: Self, + // acc_R: &Self, + // transcript: &mut Transcript, + // ) -> Self { + // let (T, T_comm) = compute_fold_proof( + // ck, + // shape, + // &acc_L.instance.u, + // &acc_L.instance.X, + // &acc_L.W, + // Some(acc_R.instance.u), + // &acc_R.instance.X, + // &acc_R.W, + // ); + // + // transcript.absorb(comm_to_base::(&T_comm)); + // let r = transcript.squeeze_bits_secondary(NUM_CHALLENGE_BITS); + // + // let W = zip_with!( + // (acc_L.W.into_par_iter(), acc_R.W.par_iter()), + // |w_L, w_R| w_L + r * w_R + // ) + // .collect(); + // + // let E = zip_with!( + // (acc_L.E.into_par_iter(), T.par_iter(), acc_R.E.par_iter()), + // |e_L, t, e_R| e_L + r * (*t + r * e_R) + // ) + // .collect(); + // + // let instance = { + // let u = acc_L.instance.u + r * acc_R.instance.u; + // let X = zip_eq(acc_L.instance.X, &acc_R.instance.X) + // .map(|(x_L, x_R)| x_L + r * x_R) + // .collect(); + // + // let W = acc_L.instance.W + acc_R.instance.W * r; + // let E_tmp = T_comm + acc_R.instance.E * r; + // let E = acc_L.instance.E + E_tmp * r; + // + // RelaxedSecondaryR1CSInstance { u, X, W, E } + // }; + // + // Self { instance, W, E } + // } +} + +impl RelaxedSecondaryR1CSInstance { + pub fn as_preimage(&self) -> impl IntoIterator> + '_ { + let u = TranscriptElement::Base(self.u); + let X = self.X.iter().cloned().map(TranscriptElement::Base); + let W = TranscriptElement::CommitmentSecondary(self.W.clone()); + let E = TranscriptElement::CommitmentSecondary(self.E.clone()); + chain![[u], X, [W, E]] + } +} + +// /// Convert a commitment over the secondary curve to its coordinates to it can be added to a transcript defined +// /// over the primary curve. +// /// The `is_infinity` flag is not added since it is computed in the circuit and the coordinates are checked. +// fn comm_to_base(comm: &Commitment) -> [E::Scalar; 2] { +// let (x, y, _) = comm.to_coordinates(); +// [x, y] +// } diff --git a/src/parafold/cycle_fold/prover.rs b/src/parafold/cycle_fold/prover.rs index 145dcc4f..e9b803d2 100644 --- a/src/parafold/cycle_fold/prover.rs +++ b/src/parafold/cycle_fold/prover.rs @@ -1,11 +1,11 @@ use bellpepper_core::ConstraintSystem; use crate::bellpepper::solver::SatisfyingAssignment; -use crate::parafold::nifs::prover::RelaxedR1CS; -use crate::parafold::nifs::FoldProof; +use crate::parafold::cycle_fold::nifs::prover::RelaxedSecondaryR1CS; use crate::parafold::transcript::prover::Transcript; +use crate::parafold::transcript::TranscriptElement; use crate::r1cs::R1CSShape; -use crate::traits::{CurveCycleEquipped, Dual, Engine}; +use crate::traits::CurveCycleEquipped; use crate::{Commitment, CommitmentKey}; /// A [ScalarMulAccumulator] represents a coprocessor for efficiently computing non-native ECC scalar multiplications @@ -18,13 +18,17 @@ use crate::{Commitment, CommitmentKey}; /// /// All operations are proved in a batch at the end of the circuit in order to minimize latency for the prover. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct ScalarMulAccumulator { +pub struct ScalarMulAccumulator { deferred: Vec>, + acc: RelaxedSecondaryR1CS, } -impl ScalarMulAccumulator { - pub fn new() -> Self { - Self { deferred: vec![] } +impl ScalarMulAccumulator { + pub fn new(acc: RelaxedSecondaryR1CS) -> Self { + Self { + deferred: vec![], + acc, + } } /// Given two commitments `A`, `B` and a scalar `x`, compute `C <- A + x * B` @@ -37,54 +41,63 @@ impl ScalarMulAccumulator { A: Commitment, B: Commitment, x: E::Scalar, - transcript: &mut Transcript, + transcript: &mut Transcript, ) -> Commitment { let C: Commitment = A + B * x; - transcript.absorb_commitment_primary::(C.clone()); + transcript.absorb(TranscriptElement::CommitmentPrimary(C.clone())); self.deferred.push(ScalarMulInstance { A, B, x, C }); C } -} -impl ScalarMulAccumulator { + // pub fn merge( + // ck: &CommitmentKey, + // shape: &R1CSShape, + // mut self_L: Self, + // self_R: &Self, + // transcript: &mut Transcript, + // ) -> Self { + // let mut deferred = self_L.deferred; + // deferred.extend(self_R.deferred.clone()); + // let acc = RelaxedSecondaryR1CS::merge(ck, shape, self_L.acc, &self_R.acc, transcript); + // Self { deferred, acc } + // } + // + /// Consume all deferred scalar multiplication instances and create a folding proof for each result. /// The proofs are folded into a mutable RelaxedR1CS for the corresponding circuit over the secondary curve. pub fn finalize( - self, + mut self, ck: &CommitmentKey, shape: &R1CSShape, - acc_cf: &mut RelaxedR1CS, - transcript: &mut Transcript, - ) -> Vec> { - self - .deferred - .into_iter() - .map(|_instance| { - let cs = SatisfyingAssignment::>::new(); - // TODO: synthesize the circuit that proves `instance` - let (X, W) = cs.to_assignments(); - acc_cf.fold_secondary::(ck, shape, X, &W, transcript) - }) - .collect() + transcript: &mut Transcript, + ) -> RelaxedSecondaryR1CS { + self.deferred.drain(..).for_each(|_instance| { + let cs = SatisfyingAssignment::::new(); + // TODO: synthesize the circuit that proves `instance` + let (X, W) = cs.to_assignments(); + self.acc.fold(ck, shape, X, &W, transcript) + }); + self.acc } - pub fn simulate_finalize( - self, - transcript: &mut Transcript, - ) -> Vec> { + pub fn simulate_finalize(mut self, transcript: &mut Transcript) -> RelaxedSecondaryR1CS { self .deferred - .into_iter() - .map(|_| RelaxedR1CS::simulate_fold_secondary::(transcript)) - .collect() + .drain(..) + .for_each(|_| RelaxedSecondaryR1CS::simulate_fold(transcript)); + self.acc + } + + pub fn is_finalized(&self) -> bool { + self.deferred.is_empty() } } #[derive(Debug, Clone, Default, PartialEq, Eq)] -pub struct ScalarMulInstance { +pub struct ScalarMulInstance { A: Commitment, B: Commitment, x: E::Scalar, diff --git a/src/parafold/mod.rs b/src/parafold/mod.rs index e0f0c9a2..c480faff 100644 --- a/src/parafold/mod.rs +++ b/src/parafold/mod.rs @@ -1,8 +1,7 @@ +mod circuit; #[allow(dead_code)] mod cycle_fold; #[allow(dead_code)] -mod ecc; -#[allow(dead_code)] mod nifs; #[allow(dead_code)] mod nivc; @@ -10,7 +9,6 @@ mod nivc; mod prover; #[allow(dead_code)] mod transcript; -mod circuit; // pub struct ProvingKey { // /// Commitment Key @@ -41,25 +39,3 @@ mod circuit; // io: NIVCState, // proof: R1CSProof, // } - -// struct Prover { -// state: SelfState, -// -// transcript: E::TE, -// scalar_mul_instances: Vec>, -// } -// -// impl Prover { -// -// /// Given -// pub fn new(pk: &ProvingKey, state: SelfState, proof: &R1CSProof) -> Self { -// let mut transcript = E::TE::new(b"fold"); -// -// -// } -// -// /// -// // pub fn new_merge(state_L: SelfState, state_R: SelfState, proof_L: &R1CSProof, proof_R: &R1CSProof) -> Self { -// // -// // } -// } diff --git a/src/parafold/nifs/circuit.rs b/src/parafold/nifs/circuit.rs index 182d6f28..6d11a84e 100644 --- a/src/parafold/nifs/circuit.rs +++ b/src/parafold/nifs/circuit.rs @@ -2,48 +2,44 @@ use bellpepper_core::num::AllocatedNum; use bellpepper_core::{ConstraintSystem, SynthesisError}; use ff::PrimeField; use itertools::*; +use neptune::circuit2::{Elt, PoseidonCircuit2}; use crate::parafold::cycle_fold::circuit::AllocatedScalarMulAccumulator; -use crate::parafold::cycle_fold::AllocatedHashedCommitment; -use crate::parafold::nifs::{FoldProof, MergeProof, RelaxedR1CSInstance}; +use crate::parafold::cycle_fold::AllocatedPrimaryCommitment; +use crate::parafold::nifs::{R1CSPoseidonConstants, RelaxedR1CSInstance}; use crate::parafold::transcript::circuit::AllocatedTranscript; -use crate::parafold::transcript::TranscriptConstants; -use crate::traits::Engine; +use crate::traits::CurveCycleEquipped; /// Allocated [RelaxedR1CSInstance] for a circuit over the primary curve. #[derive(Debug, Clone)] -pub struct AllocatedRelaxedR1CSInstance { +pub struct AllocatedRelaxedR1CSInstance { + pp: AllocatedNum, u: AllocatedNum, - X: Vec>, - W: AllocatedHashedCommitment, - E: AllocatedHashedCommitment, + X: AllocatedNum, + W: AllocatedPrimaryCommitment, + E: AllocatedPrimaryCommitment, } -impl AllocatedRelaxedR1CSInstance { +impl AllocatedRelaxedR1CSInstance { /// Folds an R1CSInstance into `self` pub fn fold( self, mut cs: CS, - X_new: Vec>, + X_new: AllocatedNum, acc_sm: &mut AllocatedScalarMulAccumulator, - fold_proof: FoldProof, - transcript: &mut AllocatedTranscript, + transcript: &mut AllocatedTranscript, ) -> Result where CS: ConstraintSystem, { - let FoldProof { W: W_new, T } = fold_proof; - - let W_new = AllocatedHashedCommitment::alloc_transcript( - cs.namespace(|| "alloc W_new"), - W_new, - transcript, - ); - let T = AllocatedHashedCommitment::alloc_transcript(cs.namespace(|| "alloc E"), T, transcript); + let W_new = transcript.read_commitment_primary(cs.namespace(|| "transcript W_new"))?; + let T = transcript.read_commitment_primary(cs.namespace(|| "transcript T"))?; let r = transcript.squeeze(&mut cs.namespace(|| "squeeze r"))?; + let r_bits = r.to_bits_le(cs.namespace(|| "r_bits"))?; let Self { + pp, W: W_curr, E: E_curr, u: u_curr, @@ -52,29 +48,25 @@ impl AllocatedRelaxedR1CSInstance { // Linear combination of acc with new let u_next = u_curr.add(cs.namespace(|| "u_next"), &r)?; - let X_next = zip_eq(X_curr, &X_new) - .enumerate() - .map(|(i, (x_curr, x_new))| { - mul_add(cs.namespace(|| format!("X_next[{i}]")), &x_curr, x_new, &r) - }) - .collect::, _>>()?; + let X_next = mul_add(cs.namespace(|| "X_next"), &X_curr, &X_new, &r)?; // W_next = W_curr + r * W_new let W_next = acc_sm.scalar_mul( cs.namespace(|| "W_next"), W_curr.clone(), W_new.clone(), - r.clone(), + r_bits.clone(), transcript, )?; let E_next = acc_sm.scalar_mul( cs.namespace(|| "E_next"), E_curr.clone(), T.clone(), - r.clone(), + r_bits, transcript, )?; Ok(Self { + pp, u: u_next, X: X_next, W: W_next, @@ -88,32 +80,28 @@ impl AllocatedRelaxedR1CSInstance { accs_L: Vec, accs_R: Vec, acc_sm: &mut AllocatedScalarMulAccumulator, - proofs: Vec>, - transcript: &mut AllocatedTranscript, + transcript: &mut AllocatedTranscript, ) -> Result, SynthesisError> where CS: ConstraintSystem, { + assert_eq!(accs_L.len(), accs_R.len()); + // Add all cross-term commitments to the transcript. - let Ts = proofs - .into_iter() - .map(|proof| { - AllocatedHashedCommitment::alloc_transcript( - cs.namespace(|| "alloc Ts"), - proof.T, - transcript, - ) - }) - .collect::>(); + let Ts = (0..accs_L.len()) + .map(|i| transcript.read_commitment_primary(cs.namespace(|| format!("transcript T[{i}]")))) + .collect::, _>>()?; // Get common challenge let r = transcript.squeeze(cs.namespace(|| "squeeze r"))?; + let r_bits = r.to_bits_le(cs.namespace(|| "r_bits"))?; // Merge all accumulators let accs_next = zip_eq(accs_L, accs_R) .zip_eq(Ts) .map(|((acc_L, acc_R), T)| { let Self { + pp: pp_L, u: u_L, X: X_L, W: W_L, @@ -121,6 +109,7 @@ impl AllocatedRelaxedR1CSInstance { .. } = acc_L; let Self { + pp: pp_R, u: u_R, X: X_R, W: W_R, @@ -128,34 +117,39 @@ impl AllocatedRelaxedR1CSInstance { .. } = acc_R; + cs.enforce( + || "pp_L = pp_R", + |lc| lc, + |lc| lc, + |lc| lc + pp_L.get_variable() - pp_R.get_variable(), + ); + let u_next = mul_add(cs.namespace(|| "u_new"), &u_L, &u_R, &r)?; - let X_next = zip_eq(X_L, X_R) - .enumerate() - .map(|(i, (x_L, x_R))| mul_add(cs.namespace(|| format!("X_new[{i}]")), &x_L, &x_R, &r)) - .collect::, _>>()?; + let X_next = mul_add(cs.namespace(|| "X_new[{i}]"), &X_L, &X_R, &r)?; let W_next = acc_sm.scalar_mul( cs.namespace(|| "W_next"), W_L.clone(), W_R.clone(), - r.clone(), + r_bits.clone(), transcript, )?; let E1_next = acc_sm.scalar_mul( cs.namespace(|| "E1_next"), T.clone(), E_R.clone(), - r.clone(), + r_bits.clone(), transcript, )?; let E_next = acc_sm.scalar_mul( cs.namespace(|| "E_next"), E_L.clone(), E1_next.clone(), - r.clone(), + r_bits.clone(), transcript, )?; Ok::(Self { + pp: pp_L, u: u_next, X: X_next, W: W_next, @@ -168,45 +162,31 @@ impl AllocatedRelaxedR1CSInstance { } /// Compute the hash of the accumulator over the primary curve. - pub fn hash( - &self, - mut cs: CS, - constants: &TranscriptConstants, - ) -> Result, SynthesisError> + pub fn hash(&self, mut cs: CS) -> Result, SynthesisError> where CS: ConstraintSystem, { - let mut transcript = AllocatedTranscript::new(constants.clone()); - transcript.absorb(self.as_preimage()); - transcript.squeeze(&mut cs) + let constants = R1CSPoseidonConstants::::new(); + let elements = chain!( + [self.pp.clone(), self.u.clone(), self.X.clone()].map(Elt::Allocated), + self.W.as_preimage(), + self.E.as_preimage() + ) + .collect::>(); + PoseidonCircuit2::new(elements, &constants).hash_to_allocated(cs.namespace(|| "hash")) } pub fn alloc(mut cs: CS, instance: RelaxedR1CSInstance) -> Self where CS: ConstraintSystem, { - // TODO: Add the circuit digest - let RelaxedR1CSInstance { u, X, W, E } = instance; - let u = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc u"), || u); - let X = X - .into_iter() - .enumerate() - .map(|(i, X)| AllocatedNum::alloc_infallible(cs.namespace(|| format!("alloc X[{i}]")), || X)) - .collect(); - let W = AllocatedHashedCommitment::alloc(cs.namespace(|| "alloc W"), W); - let E = AllocatedHashedCommitment::alloc(cs.namespace(|| "alloc E"), E); - - Self { u, X, W, E } - } + let pp = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc pp"), || instance.pp); + let u = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc u"), || instance.u); + let X = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc X"), || instance.X); + let W = AllocatedPrimaryCommitment::alloc(cs.namespace(|| "alloc W"), instance.W); + let E = AllocatedPrimaryCommitment::alloc(cs.namespace(|| "alloc E"), instance.E); - pub fn as_preimage(&self) -> impl IntoIterator> + '_ { - // TODO: Add the circuit digest - chain![ - [self.u.clone()], - self.X.iter().cloned(), - self.W.as_preimage(), - self.E.as_preimage() - ] + Self { pp, u, X, W, E } } } diff --git a/src/parafold/nifs/circuit_secondary.rs b/src/parafold/nifs/circuit_secondary.rs deleted file mode 100644 index 423e8947..00000000 --- a/src/parafold/nifs/circuit_secondary.rs +++ /dev/null @@ -1,286 +0,0 @@ -use bellpepper_core::boolean::Boolean; -use bellpepper_core::num::AllocatedNum; -use bellpepper_core::{ConstraintSystem, SynthesisError}; -use ff::PrimeField; -use itertools::zip_eq; -use num_bigint::BigInt; -use num_traits::{Num as numTraitsNum, Zero}; - -use crate::constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_CHALLENGE_BITS}; -use crate::gadgets::nonnative::bignat::BigNat; -use crate::gadgets::nonnative::util::Num; -use crate::gadgets::utils::{alloc_bignat_constant, conditionally_select_bignat, le_bits_to_num}; -use crate::parafold::ecc::AllocatedPoint; -use crate::parafold::nifs::{FoldProof, MergeProof, RelaxedR1CSInstance}; -use crate::parafold::transcript::circuit::AllocatedTranscript; -use crate::traits::{CurveCycleEquipped, Engine}; - -#[derive(Debug, Clone)] -pub struct AllocatedSecondaryRelaxedR1CSInstance { - pub u: BigNat, - pub X: Vec>, - pub W: AllocatedPoint<::GE>, - pub E: AllocatedPoint<::GE>, - // q: BigNat, // = E2::Base::MODULUS -} - -impl AllocatedSecondaryRelaxedR1CSInstance { - pub fn fold( - &mut self, - mut cs: CS, - X_new: Vec>, - fold_proof: FoldProof, - transcript: &mut AllocatedTranscript, - ) -> Result<(), SynthesisError> - where - CS: ConstraintSystem, - { - // Allocate the order of the non-native field as a constant - let q_bn = alloc_bignat_constant( - cs.namespace(|| "alloc G::Base::modulus"), - &BigInt::from_str_radix(E::Base::MODULUS, 16).unwrap(), - BN_LIMB_WIDTH, - BN_N_LIMBS, - )?; - - let FoldProof { W: W_new, T } = fold_proof; - - // Allocate W_new, T and add them to the transcript - let W_new = AllocatedPoint::alloc_transcript::<_, E, _>( - cs.namespace(|| "alloc W_new"), - W_new, - transcript, - ); - let T = AllocatedPoint::alloc_transcript::<_, E, _>(cs.namespace(|| "alloc T"), T, transcript); - - // Get challenge `r` but truncate the bits for more efficient scalar multiplication - let r_bits = transcript.squeeze_bits(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; - let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; - let r_bn = BigNat::from_num( - cs.namespace(|| "allocate r_bn"), - &Num::from(r), - BN_LIMB_WIDTH, - BN_N_LIMBS, - )?; - - let Self { - u: u_curr, - X: X_curr, - W: W_curr, - E: E_curr, - } = self; - - // We have to do a full modular reduction since merging will make `u` full-sized - let u_next = u_curr - .add(&r_bn)? - .red_mod(cs.namespace(|| "u_next = u_curr + r % q"), &q_bn)?; - let X_next = zip_eq(X_curr, X_new) - .enumerate() - .map(|(i, (x_curr_bn, x_new_bn))| { - add_mul_bn( - cs.namespace(|| format!("x_next[{i}]")), - x_curr_bn, - &x_new_bn, - &r_bn, - &q_bn, - ) - }) - .collect::, _>>()?; - - // Scalar multiplications - let W_next = W_new - .scalar_mul(cs.namespace(|| "r * W_new"), &r_bits)? - .add(cs.namespace(|| "W_curr + r * W_new"), W_curr)?; - let E_next = T - .scalar_mul(cs.namespace(|| "r * T"), &r_bits)? - .add(cs.namespace(|| "W_curr + r * T"), E_curr)?; - - *self = Self { - u: u_next, - X: X_next, - W: W_next, - E: E_next, - }; - - Ok(()) - } - - pub fn merge( - mut cs: CS, - self_L: Self, - self_R: Self, - merge_proof: MergeProof, - transcript: &mut AllocatedTranscript, - ) -> Result - where - CS: ConstraintSystem, - { - // Allocate the order of the non-native field as a constant - let q_bn = alloc_bignat_constant( - cs.namespace(|| "alloc G::Base::modulus"), - &BigInt::from_str_radix(E::Base::MODULUS, 16).unwrap(), - BN_LIMB_WIDTH, - BN_N_LIMBS, - )?; - - let MergeProof { T } = merge_proof; - - // Allocate T and add to transcript - let T = AllocatedPoint::alloc_transcript::<_, E, _>(cs.namespace(|| "alloc T"), T, transcript); - transcript.absorb(T.as_preimage()); - - // Get truncated challenge - let r_bits = transcript.squeeze_bits(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; - let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; - let r_bn = BigNat::from_num( - cs.namespace(|| "allocate r_bn"), - &Num::from(r), - BN_LIMB_WIDTH, - BN_N_LIMBS, - )?; - - let Self { - u: u_L, - X: X_L, - W: W_L, - E: E_L, - } = self_L; - let Self { - u: u_R, - X: X_R, - W: W_R, - E: E_R, - } = self_R; - - let u_next = add_mul_bn(cs.namespace(|| "u_next"), &u_L, &u_R, &r_bn, &q_bn)?; - let X_next = zip_eq(X_L, X_R) - .enumerate() - .map(|(i, (x_L_bn, x_R_bn))| { - add_mul_bn( - cs.namespace(|| format!("x_next[{i}]")), - &x_L_bn, - &x_R_bn, - &r_bn, - &q_bn, - ) - }) - .collect::, _>>()?; - - let W_next = W_R - .scalar_mul(cs.namespace(|| "r * W_R"), &r_bits)? - .add(cs.namespace(|| "W_L + r * W_R"), &W_L)?; - let E_next = { - let E_tmp = E_R - .scalar_mul(cs.namespace(|| "r * E_R"), &r_bits)? - .add(cs.namespace(|| "T + r * E_R"), &T)?; - E_tmp - .scalar_mul(cs.namespace(|| "r * E_tmp"), &r_bits)? - .add(cs.namespace(|| "E_L + r * E_tmp"), &E_L)? - }; - - Ok(Self { - u: u_next, - X: X_next, - W: W_next, - E: E_next, - }) - } - - pub fn enforce_trivial(&self, mut cs: CS, is_trivial: &Boolean) - where - CS: ConstraintSystem, - { - // TODO: If is_trivial - // u = 0 - // X = [0, ..., 0] - self - .W - .enforce_trivial(cs.namespace(|| "enforce trivial W"), is_trivial); - self - .E - .enforce_trivial(cs.namespace(|| "enforce trivial E"), is_trivial); - } - - fn alloc(/*mut*/ _cs: CS, _instance: RelaxedR1CSInstance) -> Self - where - CS: ConstraintSystem, - { - // Both u, X need to be allocated as BigInt - todo!() - // let SecondaryRelaxedR1CSInstance { u, X, W, E } = instance(); - // let u = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc u"), || u); - // let X = X - // .into_iter() - // .enumerate() - // .map(|(i, X)| AllocatedNum::alloc_infallible(cs.namespace(|| format!("alloc X[{i}]")), || X)) - // .collect(); - // let W = AllocatedHashedCommitment::alloc_infallible(cs.namespace(|| "alloc W"), || W); - // let E = AllocatedHashedCommitment::alloc_infallible(cs.namespace(|| "alloc E"), || E); - // - // Self { - // u: BigNat::alloc_from_nat(), - // X: vec![], - // W: (), - // E: (), - // } - } - - /// Allocate and add the result to the transcript - pub fn alloc_transcript( - mut cs: CS, - instance: RelaxedR1CSInstance, - transcript: &mut AllocatedTranscript, - ) -> Self - where - CS: ConstraintSystem, - { - let instance = Self::alloc(&mut cs, instance); - transcript.absorb(instance.as_preimage()); - instance - } - - pub fn select_default(self, mut cs: CS, is_default: &Boolean) -> Result - where - CS: ConstraintSystem, - { - let bn_zero = alloc_bignat_constant( - cs.namespace(|| "alloc zero"), - &BigInt::zero(), - BN_LIMB_WIDTH, - BN_N_LIMBS, - )?; - let Self { u, X, W, E } = self; - let u = conditionally_select_bignat(cs.namespace(|| "select u"), &bn_zero, &u, is_default)?; - let X = X - .into_iter() - .map(|x| conditionally_select_bignat(cs.namespace(|| "select x"), &bn_zero, &x, is_default)) - .collect::, _>>()?; - let W = W.select_default(cs.namespace(|| "select W"), is_default)?; - let E = E.select_default(cs.namespace(|| "select E"), is_default)?; - Ok(Self { u, X, W, E }) - } - - pub fn as_preimage(&self) -> impl IntoIterator> { - vec![] - } -} - -fn add_mul_bn( - mut cs: CS, - a: &BigNat, - b: &BigNat, - r: &BigNat, - q: &BigNat, -) -> Result, SynthesisError> -where - F: PrimeField, - CS: ConstraintSystem, -{ - // tmp = r * b - let (_, tmp) = b.mult_mod(cs.namespace(|| "r * b"), b, r)?; - // tmp += a - // tmp = a + r * b; - let tmp = tmp.add(a)?; - // tmp %= q - tmp.red_mod(cs.namespace(|| "a + r * b % q"), q) -} diff --git a/src/parafold/nifs/mod.rs b/src/parafold/nifs/mod.rs index 496f3dbe..1627ad58 100644 --- a/src/parafold/nifs/mod.rs +++ b/src/parafold/nifs/mod.rs @@ -1,28 +1,85 @@ -use crate::traits::Engine; -use crate::Commitment; +use digest::consts::U11; +use ff::Field; +use neptune::poseidon::PoseidonConstants; +use rayon::prelude::*; + +use crate::r1cs::R1CSShape; +use crate::traits::commitment::CommitmentEngineTrait; +use crate::traits::{CurveCycleEquipped, Engine}; +use crate::{Commitment, CommitmentKey, CE}; + pub mod circuit; -pub mod circuit_secondary; pub mod prover; +/// Exact-sized Poseidon constants for hashing a RelaxedR1CSInstance. +/// Assumes that Commitments are serialized as 4=BN_NUM_LIMBS limbs. +type R1CSPoseidonConstants = PoseidonConstants<::Scalar, U11>; + /// Instance of a Relaxed-R1CS accumulator for a circuit. +/// +/// # Details +/// We assume the IO always has size 1. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct RelaxedR1CSInstance { - // TODO: Add pp_digest for this circuit. +pub struct RelaxedR1CSInstance { + pp: E::Scalar, u: E::Scalar, - X: Vec, + X: E::Scalar, W: Commitment, E: Commitment, } -/// A proof for folding a statement X of a circuit C into a Relaxed-R1CS accumulator for the same circuit C -#[derive(Debug, Clone, Default)] -pub struct FoldProof { - W: Commitment, - T: Commitment, -} +pub fn compute_fold_proof( + ck: &CommitmentKey, + shape: &R1CSShape, + u_curr: E::Scalar, + X_curr: &[E::Scalar], + W_curr: &[E::Scalar], + u_new: Option, + X_new: &[E::Scalar], + W_new: &[E::Scalar], +) -> (Vec, Commitment) { + let u_1 = u_curr; + let u_2 = u_new.unwrap_or(E::Scalar::ONE); + let (AZ_1, BZ_1, CZ_1) = tracing::trace_span!("AZ_1, BZ_1, CZ_1") + .in_scope(|| shape.multiply_witness(W_curr, &u_1, X_curr)) + .unwrap(); + + let (AZ_2, BZ_2, CZ_2) = tracing::trace_span!("AZ_2, BZ_2, CZ_2") + .in_scope(|| shape.multiply_witness(W_new, &u_2, X_new)) + .unwrap(); + + let (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) = + tracing::trace_span!("cross terms").in_scope(|| { + let AZ_1_circ_BZ_2 = (0..AZ_1.len()) + .into_par_iter() + .map(|i| AZ_1[i] * BZ_2[i]) + .collect::>(); + let AZ_2_circ_BZ_1 = (0..AZ_2.len()) + .into_par_iter() + .map(|i| AZ_2[i] * BZ_1[i]) + .collect::>(); + let u_1_cdot_CZ_2 = (0..CZ_2.len()) + .into_par_iter() + .map(|i| u_1 * CZ_2[i]) + .collect::>(); + // TODO: Avoid multiplication by u2 if it is 1 + let u_2_cdot_CZ_1 = (0..CZ_1.len()) + .into_par_iter() + .map(|i| u_2 * CZ_1[i]) + .collect::>(); + (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) + }); + + let T = tracing::trace_span!("T").in_scope(|| { + AZ_1_circ_BZ_2 + .par_iter() + .zip_eq(&AZ_2_circ_BZ_1) + .zip_eq(&u_1_cdot_CZ_2) + .zip_eq(&u_2_cdot_CZ_1) + .map(|(((a, b), c), d)| *a + *b - *c - *d) + .collect::>() + }); -/// A proof for merging two valid Relaxed-R1CS accumulators for the same circuit C -#[derive(Debug, Clone)] -pub struct MergeProof { - T: Commitment, + let comm_T = CE::::commit(ck, &T); + (T, comm_T) } diff --git a/src/parafold/nifs/prover.rs b/src/parafold/nifs/prover.rs index 79310a78..cbfcb1a3 100644 --- a/src/parafold/nifs/prover.rs +++ b/src/parafold/nifs/prover.rs @@ -1,16 +1,15 @@ use ff::Field; -use itertools::*; +use neptune::Poseidon; use rayon::prelude::*; -use crate::constants::{BN_N_LIMBS, NUM_CHALLENGE_BITS}; use crate::parafold::cycle_fold::prover::ScalarMulAccumulator; -use crate::parafold::nifs::{FoldProof, MergeProof, RelaxedR1CSInstance}; +use crate::parafold::nifs::{compute_fold_proof, R1CSPoseidonConstants, RelaxedR1CSInstance}; use crate::parafold::transcript::prover::Transcript; -use crate::parafold::transcript::TranscriptConstants; +use crate::parafold::transcript::TranscriptElement; use crate::r1cs::R1CSShape; -use crate::traits::commitment::{CommitmentEngineTrait, CommitmentTrait}; -use crate::traits::{CurveCycleEquipped, Engine}; -use crate::{zip_with, Commitment, CommitmentKey, CE}; +use crate::traits::commitment::CommitmentEngineTrait; +use crate::traits::CurveCycleEquipped; +use crate::{Commitment, CommitmentKey}; /// A full Relaxed-R1CS accumulator for a circuit /// # TODO: @@ -18,19 +17,21 @@ use crate::{zip_with, Commitment, CommitmentKey, CE}; /// - There is only one accumulator per shape /// - We can probably use an Arc to avoid copying #[derive(Debug)] -pub struct RelaxedR1CS { +pub struct RelaxedR1CS { instance: RelaxedR1CSInstance, W: Vec, E: Vec, // TODO: store cache for Folding T } -impl RelaxedR1CS { +impl RelaxedR1CS { pub fn new(shape: &R1CSShape) -> Self { + assert_eq!(shape.num_io, 1); Self { instance: RelaxedR1CSInstance { + pp: shape.digest(), u: E::Scalar::ZERO, - X: vec![E::Scalar::ZERO; shape.num_io], + X: E::Scalar::ZERO, W: Commitment::::default(), E: Commitment::::default(), }, @@ -44,18 +45,15 @@ impl RelaxedR1CS { /// Simulate the fold protocol for a circuit on the primary curve, creating a trivial proof, /// while updating the transcript with the standard pattern. - pub fn simulate_fold_primary( - acc_sm: &mut ScalarMulAccumulator, - transcript: &mut Transcript, - ) -> FoldProof { + pub fn simulate_fold(acc_sm: &mut ScalarMulAccumulator, transcript: &mut Transcript) { let W = Commitment::::default(); let T = Commitment::::default(); - transcript.absorb_commitment_primary::(W); - transcript.absorb_commitment_primary::(T); + transcript.absorb(TranscriptElement::CommitmentPrimary(W)); + transcript.absorb(TranscriptElement::CommitmentPrimary(T)); + let r = transcript.squeeze(); let _ = acc_sm.scalar_mul(W, W, r, transcript); let _ = acc_sm.scalar_mul(T, T, r, transcript); - FoldProof { W, T } } /// Given the public IO `X_new` for a circuit with R1CS representation `shape`, @@ -66,21 +64,32 @@ impl RelaxedR1CS { /// # Warning /// We assume the R1CS IO `X_new` has already been absorbed in some form into the transcript in order to avoid /// unnecessary hashing. The caller is responsible for ensuring this assumption is valid. - pub fn fold_primary( + pub fn fold( &mut self, ck: &CommitmentKey, shape: &R1CSShape, - X_new: Vec, + X_new: E::Scalar, W_new: &[E::Scalar], acc_sm: &mut ScalarMulAccumulator, - transcript: &mut Transcript, - ) -> FoldProof { + transcript: &mut Transcript, + ) { // TODO: Parallelize both of these operations let W_comm_new = { E::CE::commit(ck, W_new) }; - let (T, T_comm) = { self.compute_fold_proof(ck, shape, None, &X_new, W_new) }; + let (T, T_comm) = { + compute_fold_proof( + ck, + shape, + self.instance.u, + &[self.instance.X], + &self.W, + None, + &[X_new], + W_new, + ) + }; - transcript.absorb_commitment_primary::(W_comm_new); - transcript.absorb_commitment_primary::(T_comm); + transcript.absorb(TranscriptElement::CommitmentPrimary(W_comm_new)); + transcript.absorb(TranscriptElement::CommitmentPrimary(T_comm)); let r = transcript.squeeze(); @@ -97,12 +106,7 @@ impl RelaxedR1CS { // For non-relaxed instances, u_new = 1 self.instance.u += r; - self - .instance - .X - .iter_mut() - .zip_eq(X_new) - .for_each(|(x, x_new)| *x += r * x_new); + self.instance.X += r * X_new; // Compute scalar multiplications and resulting instances to be proved with the CycleFold circuit // W_comm_next = W_comm_curr + r * W_comm_new @@ -110,312 +114,112 @@ impl RelaxedR1CS { // E_comm_next = E_comm_curr + r * T self.instance.E = acc_sm.scalar_mul(self.instance.E, T_comm, r, transcript); - - FoldProof { - W: W_comm_new, - T: T_comm, - } - } - - /// Given two lists of [RelaxedR1CS] accumulators, - pub fn merge_many( - ck: &CommitmentKey, - shapes: &[R1CSShape], - mut accs_L: Vec, - accs_R: &[Self], - acc_sm: &mut ScalarMulAccumulator, - transcript: &mut Transcript, - ) -> (Vec, Vec>) { - // TODO: parallelize - let (Ts, T_comms): (Vec<_>, Vec<_>) = zip_with!( - (accs_L.iter_mut(), accs_R.iter(), shapes), - |acc_L, acc_R, shape| { - acc_L.compute_fold_proof( - ck, - shape, - Some(acc_R.instance.u), - &acc_R.instance.X, - &acc_R.W, - ) - } - ) - .unzip(); - - for T_comm in &T_comms { - transcript.absorb_commitment_primary::(*T_comm); - } - let r = transcript.squeeze(); - - zip_with!( - ( - accs_L.into_iter(), - accs_R.iter(), - Ts.iter(), - T_comms.into_iter() - ), - |acc_L, acc_R, T, T_comm| { - let W = zip_with!( - (acc_L.W.into_par_iter(), acc_R.W.par_iter()), - |w_L, w_R| w_L + r * w_R - ) - .collect(); - - let E = zip_with!( - (acc_L.E.into_par_iter(), T.par_iter(), acc_R.E.par_iter()), - |e_L, t, e_R| e_L + r * (*t + r * e_R) - ) - .collect(); - - let instance = { - let u = acc_L.instance.u + r * acc_R.instance.u; - let X = zip_eq(acc_L.instance.X.into_iter(), acc_R.instance.X.iter()) - .map(|(x_L, x_R)| x_L + r * x_R) - .collect(); - - // Compute scalar multiplications and resulting instances to be proved with the CycleFold circuit - // W_next = W_L + r * W_R - let W = acc_sm.scalar_mul(acc_L.instance.W, acc_R.instance.W, r, transcript); - - let E_tmp = acc_sm.scalar_mul(T_comm, acc_R.instance.E, r, transcript); - // E_next = E_L + r * E1_next = E_L + r * T + r^2 * E_R - let E = acc_sm.scalar_mul(acc_L.instance.E, E_tmp, r, transcript); - - RelaxedR1CSInstance { u, X, W, E } - }; - - let acc = Self { instance, W, E }; - - let merge_proof = MergeProof { T: T_comm }; - - (acc, merge_proof) - } - ) - .unzip() - } - - fn compute_fold_proof( - &self, - ck: &CommitmentKey, - shape: &R1CSShape, - u_new: Option, - X_new: &[E::Scalar], - W_new: &[E::Scalar], - ) -> (Vec, Commitment) { - let u_1 = self.instance.u; - let u_2 = u_new.unwrap_or(E::Scalar::ONE); - let (AZ_1, BZ_1, CZ_1) = tracing::trace_span!("AZ_1, BZ_1, CZ_1") - .in_scope(|| shape.multiply_witness(&self.W, &u_1, &self.instance.X)) - .unwrap(); - - let (AZ_2, BZ_2, CZ_2) = tracing::trace_span!("AZ_2, BZ_2, CZ_2") - .in_scope(|| shape.multiply_witness(W_new, &u_2, X_new)) - .unwrap(); - - let (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) = - tracing::trace_span!("cross terms").in_scope(|| { - let AZ_1_circ_BZ_2 = (0..AZ_1.len()) - .into_par_iter() - .map(|i| AZ_1[i] * BZ_2[i]) - .collect::>(); - let AZ_2_circ_BZ_1 = (0..AZ_2.len()) - .into_par_iter() - .map(|i| AZ_2[i] * BZ_1[i]) - .collect::>(); - let u_1_cdot_CZ_2 = (0..CZ_2.len()) - .into_par_iter() - .map(|i| u_1 * CZ_2[i]) - .collect::>(); - // TODO: Avoid multiplication by u2 if it is 1 - let u_2_cdot_CZ_1 = (0..CZ_1.len()) - .into_par_iter() - .map(|i| u_2 * CZ_1[i]) - .collect::>(); - (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) - }); - - let T = tracing::trace_span!("T").in_scope(|| { - AZ_1_circ_BZ_2 - .par_iter() - .zip_eq(&AZ_2_circ_BZ_1) - .zip_eq(&u_1_cdot_CZ_2) - .zip_eq(&u_2_cdot_CZ_1) - .map(|(((a, b), c), d)| *a + *b - *c - *d) - .collect::>() - }); - - let comm_T = CE::::commit(ck, &T); - (T, comm_T) - } -} - -impl RelaxedR1CS { - pub fn simulate_fold_secondary(transcript: &mut Transcript) -> FoldProof - where - E: CurveCycleEquipped, - { - let W = Commitment::::default(); - let T = Commitment::::default(); - transcript.absorb_commitment_secondary::(W); - transcript.absorb_commitment_secondary::(T); - let _r = transcript.squeeze(); - FoldProof { W, T } - } - - pub fn fold_secondary( - &mut self, - ck: &CommitmentKey, - shape: &R1CSShape, - X_new: Vec, - W_new: &[E2::Scalar], - transcript: &mut Transcript, - ) -> FoldProof - where - E: CurveCycleEquipped, - { - // TODO: Parallelize both of these operations - let W_comm_new = { E2::CE::commit(ck, W_new) }; - let (T, T_comm) = { self.compute_fold_proof(ck, shape, None, &X_new, W_new) }; - - transcript.absorb(comm_to_base::(&W_comm_new)); - transcript.absorb(comm_to_base::(&T_comm)); - // TODO: Squeeze - let r = transcript.squeeze_bits_secondary(NUM_CHALLENGE_BITS); - - self - .W - .par_iter_mut() - .zip_eq(W_new.par_iter()) - .for_each(|(w, w_new)| *w += r * w_new); - self - .E - .par_iter_mut() - .zip_eq(T.par_iter()) - .for_each(|(e, t)| *e += r * t); - - // For non-relaxed instances, u_new = 1 - self.instance.u += r; - self - .instance - .X - .iter_mut() - .zip_eq(X_new) - .for_each(|(x, x_new)| *x += r * x_new); - self.instance.W = self.instance.W + W_comm_new * r; - self.instance.E = self.instance.E + T_comm * r; - - FoldProof { - W: W_comm_new, - T: T_comm, - } } - /// Given two lists of [RelaxedR1CS] accumulators, - pub fn merge_secondary( - ck: &CommitmentKey, - shape: &R1CSShape, - acc_L: Self, - acc_R: &Self, - transcript: &mut Transcript, - ) -> (Self, MergeProof) - where - E: CurveCycleEquipped, - { - let (T, T_comm) = acc_L.compute_fold_proof( - ck, - shape, - Some(acc_R.instance.u), - &acc_R.instance.X, - &acc_R.W, - ); - - transcript.absorb(comm_to_base::(&T_comm)); - let r = transcript.squeeze_bits_secondary(NUM_CHALLENGE_BITS); - - let W = zip_with!( - (acc_L.W.into_par_iter(), acc_R.W.par_iter()), - |w_L, w_R| w_L + r * w_R - ) - .collect(); - - let E = zip_with!( - (acc_L.E.into_par_iter(), T.par_iter(), acc_R.E.par_iter()), - |e_L, t, e_R| e_L + r * (*t + r * e_R) - ) - .collect(); - - let instance = { - let u = acc_L.instance.u + r * acc_R.instance.u; - let X = zip_eq(acc_L.instance.X, &acc_R.instance.X) - .map(|(x_L, x_R)| x_L + r * x_R) - .collect(); - - let W = acc_L.instance.W + acc_R.instance.W * r; - let E_tmp = T_comm + acc_R.instance.E * r; - let E = acc_L.instance.E + E_tmp * r; - - RelaxedR1CSInstance { u, X, W, E } - }; - - let acc = Self { instance, W, E }; - - let merge_proof = MergeProof { T: T_comm }; - - (acc, merge_proof) - } + // /// Given two lists of [RelaxedR1CS] accumulators, + // pub fn merge_many( + // ck: &CommitmentKey, + // shapes: &[R1CSShape], + // mut accs_L: Vec, + // accs_R: &[Self], + // acc_sm: &mut ScalarMulAccumulator, + // transcript: &mut Transcript, + // ) -> Vec { + // // TODO: parallelize + // let (Ts, T_comms): (Vec<_>, Vec<_>) = zip_with!( + // (accs_L.iter_mut(), accs_R.iter(), shapes), + // |acc_L, acc_R, shape| { + // compute_fold_proof( + // ck, + // shape, + // &acc_L.instance.u, + // &[acc_L.instance.X], + // &acc_L.W, + // Some(acc_R.instance.u), + // &[acc_R.instance.X], + // &acc_R.W, + // ) + // } + // ) + // .unzip(); + // + // for T_comm in &T_comms { + // transcript.absorb(T_comm.into()); + // } + // let r = transcript.squeeze(); + // + // zip_with!( + // ( + // accs_L.into_iter(), + // accs_R.iter(), + // Ts.iter(), + // T_comms.into_iter() + // ), + // |acc_L, acc_R, T, T_comm| { + // let W = zip_with!( + // (acc_L.W.into_par_iter(), acc_R.W.par_iter()), + // |w_L, w_R| w_L + r * w_R + // ) + // .collect(); + // + // let E = zip_with!( + // (acc_L.E.into_par_iter(), T.par_iter(), acc_R.E.par_iter()), + // |e_L, t, e_R| e_L + r * (*t + r * e_R) + // ) + // .collect(); + // + // let instance = { + // assert_eq!(acc_L.instance.pp, acc_R.instance.pp); + // let pp = acc_L.instance.pp; + // + // let u = acc_L.instance.u + r * acc_R.instance.u; + // let X = acc_L.instance.X + r * acc_R.instance.X; + // + // // Compute scalar multiplications and resulting instances to be proved with the CycleFold circuit + // // W_next = W_L + r * W_R + // let W = acc_sm.scalar_mul(acc_L.instance.W, acc_R.instance.W, r, transcript); + // + // let E_tmp = acc_sm.scalar_mul(T_comm, acc_R.instance.E, r, transcript); + // // E_next = E_L + r * E1_next = E_L + r * T + r^2 * E_R + // let E = acc_sm.scalar_mul(acc_L.instance.E, E_tmp, r, transcript); + // + // RelaxedR1CSInstance { pp, u, X, W, E } + // }; + // Self { instance, W, E } + // } + // ) + // .collect() + // } } -impl RelaxedR1CSInstance { - pub(crate) fn default(num_io: usize) -> Self { +impl RelaxedR1CSInstance { + pub fn default() -> Self { Self { - u: E::Scalar::ZERO, - X: vec![E::Scalar::ZERO; num_io], - W: Commitment::::default(), - E: Commitment::::default(), + pp: Default::default(), + u: Default::default(), + X: Default::default(), + W: Default::default(), + E: Default::default(), } } -} - -impl RelaxedR1CSInstance { - pub(crate) fn as_preimage(&self) -> impl IntoIterator + '_ { - // TODO, decompose into real limbs - let u_limbs = [E2::Base::ZERO; BN_N_LIMBS]; - let X_limbs = self.X.iter().flat_map(|_x| [E2::Base::ZERO; BN_N_LIMBS]); - let W = comm_to_base::(&self.W); - let E = comm_to_base::(&self.E); - chain![u_limbs, X_limbs, W, E] + pub fn as_preimage(&self) -> impl IntoIterator> { + let pp = TranscriptElement::Scalar(self.pp); + let u = TranscriptElement::Scalar(self.u); + let X = TranscriptElement::Scalar(self.X); + let W = TranscriptElement::CommitmentPrimary(self.W.clone()); + let E = TranscriptElement::CommitmentPrimary(self.E.clone()); + [pp, u, X, W, E] } - pub fn io_size(&self) -> usize { - [ - BN_N_LIMBS, // u - self.X.len() * BN_N_LIMBS, // X - 2, // W - 2, // E - ] - .into_iter() - .sum() - } -} - -impl RelaxedR1CSInstance { /// On the primary curve, the instances are stored as hashes in the recursive state. - pub fn hash(&self, transcript_constants: &TranscriptConstants) -> E::Scalar { - let mut transcript = Transcript::new(transcript_constants.clone()); - let Self { u, X, W, E } = self; - - transcript.absorb([*u]); - transcript.absorb(X.iter().cloned()); - transcript.absorb_commitment_primary::(*W); - transcript.absorb_commitment_primary::(*E); - - transcript.squeeze() + pub fn hash(&self) -> E::Scalar { + let constants = R1CSPoseidonConstants::::new(); + let elements = self + .as_preimage() + .into_iter() + .map(|x| x.to_field()) + .flatten() + .collect::>(); + Poseidon::new_with_preimage(&elements, &constants).hash() } } - -/// Convert a commitment over the secondary curve to its coordinates to it can be added to a transcript defined -/// over the primary curve. -/// The `is_infinity` flag is not added since it is computed in the circuit and the coordinates are checked. -fn comm_to_base(comm: &Commitment) -> [E2::Base; 2] { - let (x, y, _) = comm.to_coordinates(); - [x, y] -} diff --git a/src/parafold/nivc/circuit.rs b/src/parafold/nivc/circuit.rs index 866c04a9..16142e9f 100644 --- a/src/parafold/nivc/circuit.rs +++ b/src/parafold/nivc/circuit.rs @@ -1,16 +1,17 @@ use bellpepper_core::boolean::{AllocatedBit, Boolean}; use bellpepper_core::num::AllocatedNum; use bellpepper_core::{ConstraintSystem, LinearCombination, SynthesisError}; -use ff::{Field, PrimeField}; -use itertools::{chain, zip_eq, Itertools}; +use ff::Field; +use itertools::{chain, zip_eq}; +use neptune::circuit2::{Elt, PoseidonCircuit2}; use crate::gadgets::utils::{alloc_num_equals, alloc_zero, conditionally_select}; use crate::parafold::cycle_fold::circuit::AllocatedScalarMulAccumulator; +use crate::parafold::cycle_fold::nifs::circuit::AllocatedSecondaryRelaxedR1CSInstance; use crate::parafold::nifs::circuit::AllocatedRelaxedR1CSInstance; -use crate::parafold::nifs::circuit_secondary::AllocatedSecondaryRelaxedR1CSInstance; -use crate::parafold::nifs::{FoldProof, RelaxedR1CSInstance}; +use crate::parafold::nifs::RelaxedR1CSInstance; use crate::parafold::nivc::{ - AllocatedNIVCIO, NIVCMergeProof, NIVCStateInstance, NIVCUpdateProof, NIVCIO, + AllocatedNIVCIO, NIVCPoseidonConstants, NIVCStateInstance, NIVCUpdateProof, NIVCIO, }; use crate::parafold::transcript::circuit::AllocatedTranscript; use crate::parafold::transcript::TranscriptConstants; @@ -21,7 +22,8 @@ use crate::traits::CurveCycleEquipped; /// and the `accs` are the accumulators for each step function that was used to produce this result. #[derive(Debug, Clone)] pub struct AllocatedNIVCState { - io: AllocatedNIVCIO, + transcript_init: AllocatedNum, + io: AllocatedNIVCIO, accs_hash: Vec>, acc_cf: AllocatedSecondaryRelaxedR1CSInstance, } @@ -32,28 +34,33 @@ impl AllocatedNIVCState { mut cs: CS, ro_consts: &TranscriptConstants, proof: NIVCUpdateProof, - ) -> Result<(Self, AllocatedTranscript), SynthesisError> + ) -> Result where CS: ConstraintSystem, { let NIVCUpdateProof { - transcript_init, + transcript_buffer, state, acc_prev, index_prev, - nifs_fold_proof, - sm_fold_proofs, } = proof; - // Initialize transcript with the state of the transcript in the previous iteration - let (mut transcript, transcript_init) = AllocatedTranscript::new_init( - cs.namespace(|| "init transcript"), + // Allocate inputs + let state = AllocatedNIVCState::alloc_unchecked(cs.namespace(|| "alloc state"), state); + + // Compute hash of inputs + let state_hash = state.hash(cs.namespace(|| "state hash"))?; + + let Self { transcript_init, - ro_consts.clone(), - ); + io, + accs_hash, + acc_cf, + } = state; - // Load the initial state from the proof, adding each field to the transcript - let mut state = Self::alloc_transcript(cs.namespace(|| "alloc state"), state, &mut transcript); + // Initialize transcript with the state of the transcript in the previous iteration + let mut transcript = + AllocatedTranscript::new(ro_consts.clone(), [state_hash.clone()], transcript_buffer); // Define the base case as transcript_init == 0 let is_base_case: Boolean = { @@ -67,47 +74,56 @@ impl AllocatedNIVCState { } .into(); - // Enforce base case on loaded state - state.enforce_base_case(cs.namespace(|| "base case"), &is_base_case); + // We only need to enforce that the NIVC IO is trivial. + // We do not need to check that `accs` and `acc_sm` are trivial, the only requirement is that they are + // valid RelaxedR1CS accumulators. In practice, we do actually supply trivial accumulators. + io.enforce_trivial( + cs.namespace(|| "is_base_case => (io.in == io.out)"), + &is_base_case, + ); + + acc_cf.enforce_trivial( + cs.namespace(|| "is_base_case => acc_cf.is_trivial"), + &is_base_case, + ); // Initialize scalar mul accumulator for folding - let mut acc_sm = AllocatedScalarMulAccumulator::new(); + let mut acc_sm = AllocatedScalarMulAccumulator::new(acc_cf); // Update the set of accumulators with the fresh folding proof - state.update_accs( + let accs_hash = Self::update_accs( cs.namespace(|| "update accs"), - ro_consts, - transcript_init, + accs_hash, + state_hash, acc_prev, index_prev, - nifs_fold_proof, &is_base_case, &mut acc_sm, &mut transcript, )?; // Prove all scalar multiplication by updating the secondary curve accumulator - state.acc_cf = acc_sm.finalize( - cs.namespace(|| "finalize acc_sm"), - state.acc_cf, - sm_fold_proofs, - &mut transcript, - )?; + let acc_cf = acc_sm.finalize(cs.namespace(|| "finalize acc_sm"), &mut transcript)?; // If this is the first iteration, then reset `acc_cf` to its default state since no scalar multiplications // were actually computed - state.acc_cf = state - .acc_cf - .select_default(cs.namespace(|| "enforce trivial acc_cf"), &is_base_case)?; + let acc_cf = acc_cf.select_default(cs.namespace(|| "enforce trivial acc_cf"), &is_base_case)?; - Ok((state, transcript)) + let transcript_state = transcript.seal(cs.namespace(|| "transcript seal"))?; + + Ok(Self { + transcript_init: transcript_state, + io, + accs_hash, + acc_cf, + }) } pub fn update_io( &mut self, mut cs: CS, step_circuit: &SF, - ) -> Result, SynthesisError> + ) -> Result, SynthesisError> where CS: ConstraintSystem, SF: EnforcingStepCircuit, @@ -125,171 +141,151 @@ impl AllocatedNIVCState { self.io.to_native() } - pub fn merge( - mut cs: CS, - self_L: Self, - self_R: Self, - ro_consts: &TranscriptConstants, - proof: NIVCMergeProof, - transcript: &mut AllocatedTranscript, - ) -> Result<(Self, NIVCIO), SynthesisError> - where - CS: ConstraintSystem, - { - let mut acc_sm = AllocatedScalarMulAccumulator::new(); - - let Self { - io: io_L, - accs_hash: accs_hash_L, - acc_cf: acc_cf_L, - } = self_L; - let Self { - io: io_R, - accs_hash: accs_hash_R, - acc_cf: acc_cf_R, - } = self_R; - - let io = AllocatedNIVCIO::merge(cs.namespace(|| "io merge"), io_L, io_R); - - // Load the preimages of the accumulators in each state - let (accs_L, accs_R) = { - let accs_L = Self::load_accs( - cs.namespace(|| "load accs_R"), - proof.accs_L, - accs_hash_L, - ro_consts, - )?; - let accs_R = Self::load_accs( - cs.namespace(|| "load accs_R"), - proof.accs_R, - accs_hash_R, - ro_consts, - )?; - (accs_L, accs_R) - }; - - // Merge the two lists of accumulators and return their hashes - let accs_hash = { - let accs = AllocatedRelaxedR1CSInstance::merge_many( - cs.namespace(|| "accs"), - accs_L, - accs_R, - &mut acc_sm, - proof.nivc_merge_proof, - transcript, - )?; - - accs - .into_iter() - .map(|acc| acc.hash(cs.namespace(|| "hash acc"), ro_consts)) - .collect::, _>>()? - }; - - // Merge the secondary curve accumulators - let acc_cf = AllocatedSecondaryRelaxedR1CSInstance::merge( - cs.namespace(|| "merge acc_cf"), - acc_cf_L, - acc_cf_R, - proof.cf_merge_proof, - transcript, - )?; - - // Prove all scalar multiplications by folding the result into the secondary curve accumulator - let acc_cf = acc_sm.finalize( - cs.namespace(|| "acc_sm finalize"), - acc_cf, - proof.sm_fold_proofs, - transcript, - )?; - let state = Self { - io, - accs_hash, - acc_cf, - }; - let io = state.io.to_native()?; - - Ok((state, io)) - } + // pub fn merge( + // mut cs: CS, + // self_L: Self, + // self_R: Self, + // ro_consts: &TranscriptConstants, + // proof: NIVCMergeProof, + // transcript: &mut AllocatedTranscript, + // ) -> Result<(Self, NIVCIO), SynthesisError> + // where + // CS: ConstraintSystem, + // { + // let mut acc_sm = AllocatedScalarMulAccumulator::new(); + // + // let Self { + // io: io_L, + // accs_hash: accs_hash_L, + // acc_cf: acc_cf_L, + // } = self_L; + // let Self { + // io: io_R, + // accs_hash: accs_hash_R, + // acc_cf: acc_cf_R, + // } = self_R; + // + // let io = AllocatedNIVCIO::merge(cs.namespace(|| "io merge"), io_L, io_R); + // + // // Load the preimages of the accumulators in each state + // let (accs_L, accs_R) = { + // let accs_L = Self::load_accs( + // cs.namespace(|| "load accs_R"), + // proof.accs_L, + // accs_hash_L, + // ro_consts, + // )?; + // let accs_R = Self::load_accs( + // cs.namespace(|| "load accs_R"), + // proof.accs_R, + // accs_hash_R, + // ro_consts, + // )?; + // (accs_L, accs_R) + // }; + // + // // Merge the two lists of accumulators and return their hashes + // let accs_hash = { + // let accs = AllocatedRelaxedR1CSInstance::merge_many( + // cs.namespace(|| "accs"), + // accs_L, + // accs_R, + // &mut acc_sm, + // proof.nivc_merge_proof, + // transcript, + // )?; + // + // accs + // .into_iter() + // .map(|acc| acc.hash(cs.namespace(|| "hash acc"), ro_consts)) + // .collect::, _>>()? + // }; + // + // // Merge the secondary curve accumulators + // let acc_cf = AllocatedSecondaryRelaxedR1CSInstance::merge( + // cs.namespace(|| "merge acc_cf"), + // acc_cf_L, + // acc_cf_R, + // proof.cf_merge_proof, + // transcript, + // )?; + // + // // Prove all scalar multiplications by folding the result into the secondary curve accumulator + // let acc_cf = acc_sm.finalize( + // cs.namespace(|| "acc_sm finalize"), + // acc_cf, + // proof.sm_fold_proofs, + // transcript, + // )?; + // let state = Self { + // io, + // accs_hash, + // acc_cf, + // }; + // let io = state.io.to_native()?; + // + // Ok((state, io)) + // } pub fn inputize(&self, mut cs: CS) -> Result<(), SynthesisError> where CS: ConstraintSystem, { - for x in self.as_preimage() { - x.inputize(cs.namespace(|| "inputize"))? - } - Ok(()) + let hash = self.hash(cs.namespace(|| "hash state"))?; + hash.inputize(cs.namespace(|| "inputize hash")) } - fn alloc_transcript( - mut cs: CS, - state: NIVCStateInstance, - transcript: &mut AllocatedTranscript, - ) -> Self + fn alloc_unchecked(mut cs: CS, state: NIVCStateInstance) -> Self where CS: ConstraintSystem, { - let NIVCStateInstance { - io, - accs_hash, - acc_cf, - } = state; + let transcript_init = + AllocatedNum::alloc_infallible(cs.namespace(|| "alloc transcript_init"), || { + state.transcript_state + }); - let io = AllocatedNIVCIO::alloc_transcript::<_, E>(cs.namespace(|| "alloc io"), io, transcript); + let io = AllocatedNIVCIO::alloc(cs.namespace(|| "alloc io"), state.io); - let accs_hash = accs_hash + let accs_hash = state + .accs_hash .into_iter() .map(|acc_hash| { - let acc_hash = - AllocatedNum::alloc_infallible(cs.namespace(|| "alloc acc_hash"), || acc_hash); - transcript.absorb([acc_hash.clone()]); - acc_hash + AllocatedNum::alloc_infallible(cs.namespace(|| "alloc acc_hash"), || acc_hash) }) .collect::>(); - let acc_cf = AllocatedSecondaryRelaxedR1CSInstance::alloc_transcript( + let acc_cf = AllocatedSecondaryRelaxedR1CSInstance::alloc_unchecked( cs.namespace(|| "alloc acc_cf"), - acc_cf, - transcript, + state.acc_cf, ); Self { + transcript_init, io, accs_hash, acc_cf, } } - fn enforce_base_case(&self, mut cs: CS, is_base_case: &Boolean) + pub fn hash(&self, mut cs: CS) -> Result, SynthesisError> where CS: ConstraintSystem, { - // We only need to enforce that the NIVC IO is trivial. - // We do not need to check that `accs` and `acc_sm` are trivial, the only requirement is that they are - // valid RelaxedR1CS accumulators. In practice, we do actually supply trivial accumulators. - self.io.enforce_trivial( - cs.namespace(|| "is_init => (io.in == io.out)"), - is_base_case, - ); - - self.acc_cf.enforce_trivial( - cs.namespace(|| "is_init => acc_cf.is_trivial"), - is_base_case, - ); + let elements = self.as_preimage().into_iter().collect::>(); + let constants = NIVCPoseidonConstants::::new_constant_length(elements.len()); + PoseidonCircuit2::new(elements, &constants).hash_to_allocated(cs.namespace(|| "state hash")) } fn update_accs( - &mut self, mut cs: CS, - ro_consts: &TranscriptConstants, - transcript_init: AllocatedNum, + accs_hash: Vec>, + state_hash: AllocatedNum, acc_prev: RelaxedR1CSInstance, index_prev: Option, - nifs_fold_proof: FoldProof, is_base_case: &Boolean, acc_sm: &mut AllocatedScalarMulAccumulator, - transcript: &mut AllocatedTranscript, - ) -> Result<(), SynthesisError> + transcript: &mut AllocatedTranscript, + ) -> Result>, SynthesisError> where CS: ConstraintSystem, { @@ -299,57 +295,44 @@ impl AllocatedNIVCState { AllocatedRelaxedR1CSInstance::alloc(cs.namespace(|| "alloc acc_prev"), acc_prev); // Compute its hash - let acc_prev_hash = acc_prev.hash(cs.namespace(|| "hash acc_prev"), ro_consts)?; + let acc_prev_hash = acc_prev.hash(cs.namespace(|| "hash acc_prev"))?; // Set the R1CS IO as the transcript init followed by the state - let X_prev = chain![[transcript_init], self.as_preimage()].collect::>(); - let acc_curr = acc_prev.fold( - cs.namespace(|| "fold"), - X_prev, - acc_sm, - nifs_fold_proof, - transcript, - )?; + let acc_curr = acc_prev.fold(cs.namespace(|| "fold"), state_hash, acc_sm, transcript)?; - let acc_curr_hash = acc_curr.hash(cs.namespace(|| "hash acc_curr"), ro_consts)?; + let acc_curr_hash = acc_curr.hash(cs.namespace(|| "hash acc_curr"))?; (acc_prev_hash, acc_curr_hash) }; // Create selector for acc_prev_hash and ensure it is contained in accs_hash let accs_hash_selector = { - let bits = self - .accs_hash + let bits = accs_hash .iter() .enumerate() - .map(|(index, acc_hash)| { - // Allocate a bit which - let bit = AllocatedBit::alloc(cs.namespace(|| "alloc selector"), { - let bit = if let Some(index_prev) = index_prev { - index_prev == index - } else { - false - }; - Some(bit) - }) - .unwrap(); + .map(|(i, acc_hash)| { + // Allocate a bit which is true if i == index_prev + let bit = index_prev.map_or(false, |index_prev| index_prev == i); + let bit = AllocatedBit::alloc(cs.namespace(|| format!("alloc selector[{i}]")), Some(bit)) + .unwrap(); // Ensure acc_hash[index_prev] = acc_prev_hash cs.enforce( - || "bit * (acc_hash - acc_prev_hash) = 0", + || format!("bit[{i}] * (acc_hash[{i}] - acc_prev_hash[{i}]) = 0"), |lc| lc + bit.get_variable(), |lc| lc + acc_hash.get_variable() - acc_prev_hash.get_variable(), |lc| lc, ); - bit + Boolean::Is(bit) }) .collect::>(); - let lc_sum = bits - .iter() - .fold(LinearCombination::zero(), |lc, bit| lc + bit.get_variable()); + // Compute sum of all bits + let lc_sum = bits.iter().fold(LinearCombination::zero(), |lc, bit| { + lc + &bit.lc(CS::one(), E::Scalar::ONE) + }); // Ensure only 1 selection bit is true, except in the base case where all bits are 0 cs.enforce( @@ -363,97 +346,93 @@ impl AllocatedNIVCState { }; // Update hashes of accumulators in state - self - .accs_hash - .iter_mut() - .zip_eq(accs_hash_selector) - .for_each(|(acc_hash, bit)| { - *acc_hash = conditionally_select( + zip_eq(accs_hash.into_iter(), accs_hash_selector.iter()) + .map(|(acc_hash, bit)| { + conditionally_select( cs.namespace(|| "accs_hash_curr"), &acc_curr_hash, - acc_hash, - &Boolean::Is(bit), + &acc_hash, + bit, ) - .unwrap(); - }); - Ok(()) + }) + .collect::, _>>() } - fn as_preimage(&self) -> impl IntoIterator> + '_ { + fn as_preimage(&self) -> impl IntoIterator> + '_ { chain![ + [Elt::Allocated(self.transcript_init.clone())], self.io.as_preimage(), - self.accs_hash.iter().cloned(), + self.accs_hash.iter().cloned().map(Elt::Allocated), self.acc_cf.as_preimage() ] } - fn load_accs( - mut cs: CS, - accs_native: Vec>, - accs_hash: Vec>, - ro_consts: &TranscriptConstants, - ) -> Result>, SynthesisError> - where - CS: ConstraintSystem, - { - zip_eq(accs_native, accs_hash) - .map( - |(acc_native, acc_hash): (RelaxedR1CSInstance, AllocatedNum)| { - let acc = AllocatedRelaxedR1CSInstance::alloc(cs.namespace(|| "alloc acc"), acc_native); - let acc_hash_real = acc.hash(cs.namespace(|| "hash acc"), ro_consts)?; - - // Ensure the loaded accumulator's hash matches the one from the state - cs.enforce( - || "acc_hash_real == acc_hash", - |lc| lc, - |lc| lc, - |lc| lc + acc_hash_real.get_variable() - acc_hash.get_variable(), - ); - Ok::<_, SynthesisError>(acc) - }, - ) - .collect::, _>>() - } + // fn load_accs( + // mut cs: CS, + // accs_native: Vec>, + // accs_hash: Vec>, + // ) -> Result>, SynthesisError> + // where + // CS: ConstraintSystem, + // { + // zip_eq(accs_native, accs_hash) + // .map( + // |(acc_native, acc_hash): (RelaxedR1CSInstance, AllocatedNum)| { + // let acc = AllocatedRelaxedR1CSInstance::alloc(cs.namespace(|| "alloc acc"), acc_native); + // let acc_hash_real = acc.hash(cs.namespace(|| "hash acc"))?; + // + // // Ensure the loaded accumulator's hash matches the one from the state + // cs.enforce( + // || "acc_hash_real == acc_hash", + // |lc| lc, + // |lc| lc, + // |lc| lc + acc_hash_real.get_variable() - acc_hash.get_variable(), + // ); + // Ok::<_, SynthesisError>(acc) + // }, + // ) + // .collect::, _>>() + // } } -impl AllocatedNIVCIO { - pub fn merge(mut cs: CS, io_L: Self, io_R: Self) -> Self - where - CS: ConstraintSystem, - { - // io_L.pc_out = io_R.pc_in - cs.enforce( - || "io_L.pc_out = io_R.pc_in", - |lc| lc, - |lc| lc, - |lc| lc + io_L.pc_out.get_variable() - io_R.pc_in.get_variable(), - ); - - // io_L.z_out = io_R.z_in - zip_eq(&io_L.z_out, &io_R.z_in) - .enumerate() - .for_each(|(i, (z_L_i, z_R_i))| { - cs.enforce( - || format!("io_L.z_out[{i}] = io_R.z_in[{i}]"), - |lc| lc, - |lc| lc, - |lc| lc + z_L_i.get_variable() - z_R_i.get_variable(), - ); - }); - - Self { - pc_in: io_L.pc_in, - z_in: io_L.z_in, - pc_out: io_R.pc_out, - z_out: io_R.z_out, - } - } +impl AllocatedNIVCIO { + // pub fn merge(mut cs: CS, io_L: Self, io_R: Self) -> Self + // where + // CS: ConstraintSystem, + // { + // // io_L.pc_out = io_R.pc_in + // cs.enforce( + // || "io_L.pc_out = io_R.pc_in", + // |lc| lc, + // |lc| lc, + // |lc| lc + io_L.pc_out.get_variable() - io_R.pc_in.get_variable(), + // ); + // + // // io_L.z_out = io_R.z_in + // zip_eq(&io_L.z_out, &io_R.z_in) + // .enumerate() + // .for_each(|(i, (z_L_i, z_R_i))| { + // cs.enforce( + // || format!("io_L.z_out[{i}] = io_R.z_in[{i}]"), + // |lc| lc, + // |lc| lc, + // |lc| lc + z_L_i.get_variable() - z_R_i.get_variable(), + // ); + // }); + // + // Self { + // pc_in: io_L.pc_in, + // z_in: io_L.z_in, + // pc_out: io_R.pc_out, + // z_out: io_R.z_out, + // } + // } pub fn enforce_trivial(&self, mut cs: CS, is_trivial: &Boolean) where - CS: ConstraintSystem, + CS: ConstraintSystem, { - let is_trivial = is_trivial.lc(CS::one(), F::ONE); + let is_trivial = is_trivial.lc(CS::one(), E::Scalar::ONE); // (is_trivial) * (z_in - z_out) = 0 zip_eq(&self.z_in, &self.z_out) @@ -476,22 +455,19 @@ impl AllocatedNIVCIO { ); } - pub fn as_preimage(&self) -> impl IntoIterator> + '_ { + pub fn as_preimage(&self) -> impl IntoIterator> + '_ { chain![ [self.pc_in.clone()], self.z_in.iter().cloned(), [self.pc_out.clone()], self.z_out.iter().cloned() ] + .map(Elt::Allocated) } - pub fn alloc_transcript>( - mut cs: CS, - state: NIVCIO, - transcript: &mut AllocatedTranscript, - ) -> Self + pub fn alloc(mut cs: CS, state: NIVCIO) -> Self where - CS: ConstraintSystem, + CS: ConstraintSystem, { let NIVCIO { pc_in, @@ -517,18 +493,16 @@ impl AllocatedNIVCIO { }) .collect(); - let io = Self { + Self { pc_in, z_in, pc_out, z_out, - }; - transcript.absorb(io.as_preimage()); - io + } } /// Attempt to extract the native representation. - pub fn to_native(&self) -> Result, SynthesisError> { + pub fn to_native(&self) -> Result, SynthesisError> { let pc_in = self .pc_in .get_value() diff --git a/src/parafold/nivc/mod.rs b/src/parafold/nivc/mod.rs index 4bf17157..6df9d2b6 100644 --- a/src/parafold/nivc/mod.rs +++ b/src/parafold/nivc/mod.rs @@ -1,8 +1,11 @@ use bellpepper_core::num::AllocatedNum; -use ff::PrimeField; +use neptune::generic_array::typenum::U24; +use neptune::poseidon::PoseidonConstants; -use crate::parafold::nifs::{FoldProof, MergeProof, RelaxedR1CSInstance}; -use crate::traits::CurveCycleEquipped; +use crate::parafold::cycle_fold::nifs::prover::RelaxedSecondaryR1CSInstance; +use crate::parafold::nifs::RelaxedR1CSInstance; +use crate::parafold::transcript::TranscriptElement; +use crate::traits::{CurveCycleEquipped, Engine}; pub mod circuit; pub mod prover; @@ -12,51 +15,47 @@ pub mod prover; /// # Note /// - An IO result is trivial if {pc, z}_in == {pc, z}_out #[derive(Debug, Clone, PartialEq, Eq)] -pub struct NIVCIO { - pc_in: F, - z_in: Vec, - pc_out: F, - z_out: Vec, +pub struct NIVCIO { + pc_in: E::Scalar, + z_in: Vec, + pc_out: E::Scalar, + z_out: Vec, } /// The input and output of a NIVC computation over one or more steps. #[derive(Debug, Clone)] -pub struct AllocatedNIVCIO { - pc_in: AllocatedNum, - z_in: Vec>, - pc_out: AllocatedNum, - z_out: Vec>, +pub struct AllocatedNIVCIO { + pc_in: AllocatedNum, + z_in: Vec>, + pc_out: AllocatedNum, + z_out: Vec>, } +type NIVCPoseidonConstants = PoseidonConstants<::Scalar, U24>; + /// Succinct representation of the recursive NIVC state that is known #[derive(Clone, Debug)] pub struct NIVCStateInstance { - io: NIVCIO, + transcript_state: E::Scalar, + io: NIVCIO, accs_hash: Vec, - acc_cf: RelaxedR1CSInstance, + acc_cf: RelaxedSecondaryR1CSInstance, } /// A proof for loading a previous NIVC output inside a circuit. #[derive(Debug, Clone)] pub struct NIVCUpdateProof { - transcript_init: E::Scalar, + transcript_buffer: Vec>, state: NIVCStateInstance, acc_prev: RelaxedR1CSInstance, index_prev: Option, - nifs_fold_proof: FoldProof, - - sm_fold_proofs: [FoldProof; 2], } #[derive(Debug, Clone)] pub struct NIVCMergeProof { + transcript_buffer: Vec, accs_L: Vec>, accs_R: Vec>, - nivc_merge_proof: Vec>, - - cf_merge_proof: MergeProof, - - sm_fold_proofs: Vec>, } diff --git a/src/parafold/nivc/prover.rs b/src/parafold/nivc/prover.rs index a932f165..ca2d7a62 100644 --- a/src/parafold/nivc/prover.rs +++ b/src/parafold/nivc/prover.rs @@ -1,28 +1,31 @@ -use ff::{Field, PrimeField}; +use ff::Field; use itertools::chain; +use neptune::Poseidon; +use crate::parafold::cycle_fold::nifs::prover::RelaxedSecondaryR1CS; use crate::parafold::cycle_fold::prover::ScalarMulAccumulator; use crate::parafold::nifs::prover::RelaxedR1CS; -use crate::parafold::nifs::{FoldProof, RelaxedR1CSInstance}; -use crate::parafold::nivc::{NIVCMergeProof, NIVCStateInstance, NIVCUpdateProof, NIVCIO}; +use crate::parafold::nifs::RelaxedR1CSInstance; +use crate::parafold::nivc::{NIVCPoseidonConstants, NIVCStateInstance, NIVCUpdateProof, NIVCIO}; use crate::parafold::transcript::prover::Transcript; -use crate::parafold::transcript::TranscriptConstants; +use crate::parafold::transcript::{TranscriptConstants, TranscriptElement}; use crate::r1cs::R1CSShape; -use crate::traits::{CurveCycleEquipped, Engine}; +use crate::traits::CurveCycleEquipped; use crate::CommitmentKey; #[derive(Debug)] pub struct NIVCState { - transcript: Transcript, - io: NIVCIO, + transcript_state: E::Scalar, + io: NIVCIO, accs: Vec>, - acc_cf: RelaxedR1CS, + acc_cf: RelaxedSecondaryR1CS, } #[derive(Debug)] -pub struct NIVCUpdateWitness { +pub struct NIVCUpdateWitness { pub(crate) index: usize, pub(crate) W: Vec, + pub(crate) io: NIVCIO, } impl NIVCState { @@ -33,175 +36,187 @@ impl NIVCState { /// In the first iteration, the circuit verifier checks the base-case conditions, but does not update any /// of the accumulators. To ensure uniformity with the non-base case path, the transcript will be updated /// in the normal way, albeit with dummy proof data. - /// - /// - pub fn init( + pub fn new( shapes: &[R1CSShape], shape_cf: &R1CSShape, - ro_consts: &TranscriptConstants, pc_init: usize, z_init: Vec, + ro_consts: &TranscriptConstants, ) -> (Self, NIVCUpdateProof) { - let transcript_init = E::Scalar::ZERO; - let mut state = Self { - transcript: Transcript::new_init(transcript_init, ro_consts.clone()), - io: NIVCIO::new(pc_init, z_init), - accs: shapes.iter().map(|shape| RelaxedR1CS::new(shape)).collect(), - acc_cf: RelaxedR1CS::new(shape_cf), + let transcript_state = E::Scalar::ZERO; + let io = NIVCIO::new(pc_init, z_init); + let accs = shapes + .iter() + .map(|shape| RelaxedR1CS::new(shape)) + .collect::>(); + let acc_cf = RelaxedSecondaryR1CS::new(shape_cf); + + let state_instance = NIVCStateInstance { + transcript_state, + io: io.clone(), + accs_hash: accs.iter().map(|acc| acc.instance().hash()).collect(), + acc_cf: acc_cf.instance().clone(), }; - let state_instance = state.instance(ro_consts); + let state_hash = state_instance.hash(); + + let mut transcript = Transcript::new(ro_consts.clone(), [state_hash]); - let num_io = state_instance.io_size(); - state.transcript.absorb(state_instance.as_preimage()); - let acc_prev = RelaxedR1CSInstance::default(num_io); + let mut acc_sm = ScalarMulAccumulator::new(acc_cf); + RelaxedR1CS::simulate_fold(&mut acc_sm, &mut transcript); + let acc_cf = acc_sm.simulate_finalize(&mut transcript); - let mut acc_sm = ScalarMulAccumulator::new(); - let nifs_fold_proof = RelaxedR1CS::simulate_fold_primary(&mut acc_sm, &mut state.transcript); - let sm_fold_proofs: [FoldProof; 2] = acc_sm - .simulate_finalize(&mut state.transcript) - .try_into() - .unwrap(); + let (transcript_state, transcript_buffer) = transcript.seal(); let proof = NIVCUpdateProof { - transcript_init, + transcript_buffer, state: state_instance, - acc_prev, + acc_prev: RelaxedR1CSInstance::::default(), index_prev: None, - nifs_fold_proof, - sm_fold_proofs, + }; + + let state = Self { + transcript_state, + io, + accs, + acc_cf, }; (state, proof) } - fn update( - &mut self, + pub fn update( + self, ck: &CommitmentKey, ck_cf: &CommitmentKey, ro_consts: &TranscriptConstants, shapes: &[R1CSShape], shape_cf: &R1CSShape, witness_prev: &NIVCUpdateWitness, - ) -> NIVCUpdateProof { - let mut acc_sm = ScalarMulAccumulator::::new(); - let transcript_init = self.transcript.seal(); + ) -> (Self, NIVCUpdateProof) { + let state_instance = self.instance(); + let state_hash = state_instance.hash(); + let mut transcript = Transcript::new(ro_consts.clone(), [state_hash]); - let state = self.instance(ro_consts); + let Self { + transcript_state, + mut io, + mut accs, + acc_cf, + } = self; + + let mut acc_sm = ScalarMulAccumulator::new(acc_cf); - let X_prev = chain![[transcript_init], state.as_preimage()].collect(); + let X_prev = transcript_state; let NIVCUpdateWitness { index: index_prev, W: W_prev, + io: io_next, } = witness_prev; + + io.update(io_next.clone()); + let index_prev = *index_prev; - let acc_prev = self.accs[index_prev].instance().clone(); + let acc_prev = accs[index_prev].instance().clone(); let shape_prev = &shapes[index_prev]; // Fold the proof for the previous iteration into the correct accumulator - let nifs_fold_proof = self.accs[index_prev].fold_primary( - ck, - shape_prev, - X_prev, - W_prev, - &mut acc_sm, - &mut self.transcript, - ); - - let sm_fold_proofs: [FoldProof; 2] = acc_sm - .finalize(ck_cf, shape_cf, &mut self.acc_cf, &mut self.transcript) - .try_into() - .unwrap(); - - NIVCUpdateProof { - transcript_init, - state, - acc_prev, - index_prev: Some(index_prev), - nifs_fold_proof, - sm_fold_proofs, - } - } - - pub fn merge( - ck: &CommitmentKey, - ck_cf: &CommitmentKey, - shapes: &[R1CSShape], - shape_cf: &R1CSShape, - self_L: Self, - self_R: &Self, - ) -> (Self, NIVCMergeProof) { - let Self { - transcript: transcript_L, - io: io_L, - accs: accs_L, - acc_cf: acc_cf_L, - } = self_L; - let Self { - transcript: transcript_R, - io: io_R, - accs: accs_R, - acc_cf: acc_cf_R, - } = self_R; + accs[index_prev].fold(ck, shape_prev, X_prev, W_prev, &mut acc_sm, &mut transcript); + let acc_cf = acc_sm.finalize(ck_cf, shape_cf, &mut transcript); - let mut acc_sm = ScalarMulAccumulator::new(); - let mut transcript = Transcript::merge(transcript_L, transcript_R); + let (transcript_state, transcript_buffer) = transcript.seal(); - let io = NIVCIO::merge(io_L, io_R.clone()); - - let accs_L_instance = accs_L - .iter() - .map(|acc| acc.instance()) - .cloned() - .collect::>(); - let accs_R_instance = accs_R - .iter() - .map(|acc| acc.instance()) - .cloned() - .collect::>(); - - let (accs, nivc_merge_proof) = - RelaxedR1CS::merge_many(ck, shapes, accs_L, accs_R, &mut acc_sm, &mut transcript); - - let (mut acc_cf, cf_merge_proof) = RelaxedR1CS::::merge_secondary::( - ck_cf, - shape_cf, - acc_cf_L, - acc_cf_R, - &mut transcript, - ); - - let sm_fold_proofs = acc_sm.finalize(ck_cf, shape_cf, &mut acc_cf, &mut transcript); - - let self_next = Self { - transcript, + let proof = NIVCUpdateProof { + transcript_buffer, + state: state_instance, + acc_prev, + index_prev: Some(index_prev), + }; + let state = Self { + transcript_state, io, accs, acc_cf, }; - - let merge_proof = NIVCMergeProof { - accs_L: accs_L_instance, - accs_R: accs_R_instance, - nivc_merge_proof, - cf_merge_proof, - sm_fold_proofs, - }; - - (self_next, merge_proof) + (state, proof) } - pub fn instance(&self, ro_consts: &TranscriptConstants) -> NIVCStateInstance { - let accs_hash = self - .accs - .iter() - .map(|acc| acc.instance().hash(ro_consts)) - .collect(); + // pub fn merge( + // ck: &CommitmentKey, + // ck_cf: &CommitmentKey, + // shapes: &[R1CSShape], + // shape_cf: &R1CSShape, + // self_L: Self, + // self_R: &Self, + // ) -> (Self, NIVCMergeProof) { + // let Self { + // transcript: transcript_L, + // io: io_L, + // accs: accs_L, + // acc_cf: acc_cf_L, + // } = self_L; + // let Self { + // transcript: transcript_R, + // io: io_R, + // accs: accs_R, + // acc_cf: acc_cf_R, + // } = self_R; + // + // let mut acc_sm = ScalarMulAccumulator::new(); + // let mut transcript = Transcript::merge(transcript_L, transcript_R); + // + // let io = NIVCIO::merge(io_L, io_R.clone()); + // + // let accs_L_instance = accs_L + // .iter() + // .map(|acc| acc.instance()) + // .cloned() + // .collect::>(); + // let accs_R_instance = accs_R + // .iter() + // .map(|acc| acc.instance()) + // .cloned() + // .collect::>(); + // + // let (accs, nivc_merge_proof) = + // RelaxedR1CS::merge_many(ck, shapes, accs_L, accs_R, &mut acc_sm, &mut transcript); + // + // let (mut acc_cf, cf_merge_proof) = RelaxedR1CS::::merge_secondary::( + // ck_cf, + // shape_cf, + // acc_cf_L, + // acc_cf_R, + // &mut transcript, + // ); + // + // let sm_fold_proofs = acc_sm.finalize(ck_cf, shape_cf, &mut acc_cf, &mut transcript); + // + // let self_next = Self { + // transcript, + // io, + // accs, + // acc_cf, + // }; + // + // let merge_proof = NIVCMergeProof { + // accs_L: accs_L_instance, + // accs_R: accs_R_instance, + // nivc_merge_proof, + // cf_merge_proof, + // sm_fold_proofs, + // }; + // + // (self_next, merge_proof) + // } + + pub fn instance(&self) -> NIVCStateInstance { + let accs_hash = self.accs.iter().map(|acc| acc.instance().hash()).collect(); NIVCStateInstance { + transcript_state: self.transcript_state, io: self.io.clone(), accs_hash, acc_cf: self.acc_cf.instance().clone(), @@ -210,63 +225,66 @@ impl NIVCState { } impl NIVCStateInstance { - pub fn as_preimage(&self) -> impl IntoIterator + '_ { + pub fn hash(&self) -> E::Scalar { + let elements = self + .as_preimage() + .into_iter() + .map(|x| x.to_field()) + .flatten() + .collect::>(); + let constants = NIVCPoseidonConstants::::new_constant_length(elements.len()); + Poseidon::new_with_preimage(&elements, &constants).hash() + } + + pub fn as_preimage(&self) -> impl IntoIterator> + '_ { chain![ + [TranscriptElement::Scalar(self.transcript_state)], self.io.as_preimage(), - self.accs_hash.iter().cloned(), + self + .accs_hash + .iter() + .cloned() + .map(TranscriptElement::Scalar), self.acc_cf.as_preimage() ] } - - pub fn io_size(&self) -> usize { - [ - 1, // transcript init - self.io.io_size(), // io - self.accs_hash.len(), // accs_hash - self.acc_cf.io_size(), // acc_cf - ] - .into_iter() - .sum() - } } -impl NIVCIO { - pub fn new(pc_init: usize, z_init: Vec) -> Self { +impl NIVCIO { + pub fn new(pc_init: usize, z_init: Vec) -> Self { Self { - pc_in: F::from(pc_init as u64), + pc_in: E::Scalar::from(pc_init as u64), z_in: z_init.clone(), - pc_out: F::from(pc_init as u64), + pc_out: E::Scalar::from(pc_init as u64), z_out: z_init, } } - pub fn merge(self_L: Self, self_R: Self) -> Self { - assert_eq!(self_L.pc_out, self_R.pc_in); - assert_eq!(self_L.z_out, self_R.z_in); - Self { - pc_in: self_L.pc_in, - z_in: self_L.z_in, - pc_out: self_R.pc_out, - z_out: self_R.z_out, - } + pub fn update(&mut self, io_next: Self) { + assert_eq!(self.pc_in, io_next.pc_in); + assert_eq!(self.z_in, io_next.z_in); + self.pc_out = io_next.pc_out; + self.z_out = io_next.z_out; } - pub fn as_preimage(&self) -> impl IntoIterator + '_ { + + // pub fn merge(self_L: Self, self_R: Self) -> Self { + // assert_eq!(self_L.pc_out, self_R.pc_in); + // assert_eq!(self_L.z_out, self_R.z_in); + // Self { + // pc_in: self_L.pc_in, + // z_in: self_L.z_in, + // pc_out: self_R.pc_out, + // z_out: self_R.z_out, + // } + // } + + pub fn as_preimage(&self) -> impl IntoIterator> + '_ { chain![ [self.pc_in], self.z_in.iter().cloned(), [self.pc_out], self.z_out.iter().cloned() ] - } - - pub fn io_size(&self) -> usize { - [ - 1, // pc_in - self.z_in.len(), // z_in - 1, // pc_out - self.z_out.len(), // z_out - ] - .into_iter() - .sum() + .map(TranscriptElement::Scalar) } } diff --git a/src/parafold/prover.rs b/src/parafold/prover.rs index 676db712..4b994198 100644 --- a/src/parafold/prover.rs +++ b/src/parafold/prover.rs @@ -1,9 +1,13 @@ -use crate::parafold::nivc::prover::NIVCState; +use crate::bellpepper::solver::SatisfyingAssignment; +use crate::parafold::circuit::synthesize_step; +use crate::parafold::nivc::prover::{NIVCState, NIVCUpdateWitness}; use crate::parafold::nivc::NIVCUpdateProof; use crate::parafold::transcript::TranscriptConstants; use crate::r1cs::R1CSShape; +use crate::supernova::StepCircuit; use crate::traits::CurveCycleEquipped; use crate::CommitmentKey; +use bellpepper_core::ConstraintSystem; pub struct ProvingKey { // public params @@ -27,38 +31,35 @@ impl RecursiveSNARK { assert!(pc_init < num_circuits); // Check arity z_init.len(); - let (state, proof) = NIVCState::init(&pk.shapes, &pk.shape_cf, &pk.ro_consts, pc_init, z_init); + let (state, proof) = NIVCState::new(&pk.shapes, &pk.shape_cf, pc_init, z_init, &pk.ro_consts); Self { state, proof } } - // pub fn prove_step>( - // &mut self, - // pk: &ProvingKey, - // step_circuit: &C, - // ) -> Self { - // let Self { state, proof } = self; - // let circuit_index = step_circuit.circuit_index(); - // let mut cs = SatisfyingAssignment::::new(); - // let io = synthesize_step(&mut cs, &pk.ro_consts, proof, step_circuit).unwrap(); - // let W = cs.aux_assignment(); - // // assert state_instance == state.instance - // let witness = NIVCUpdateWitness { - // index: circuit_index, - // W: W.to_vec(), - // }; - // - // - // let proof = state.update( - // &pk.ck, - // &pk.shapes, - // &pk.nivc_hasher, - // &witness, - // &mut transcript, - // ); - // - // Self { state, proof } - // } + pub fn prove_step>(self, pk: &ProvingKey, step_circuit: &C) -> Self { + let Self { state, proof } = self; + let circuit_index = step_circuit.circuit_index(); + let mut cs = SatisfyingAssignment::::new(); + let io = synthesize_step(&mut cs, &pk.ro_consts, proof, step_circuit).unwrap(); + let W = cs.aux_assignment(); + + let witness = NIVCUpdateWitness { + index: circuit_index, + W: W.to_vec(), + io, + }; + + let (state, proof) = state.update( + &pk.ck, + &pk.ck_cf, + &pk.ro_consts, + &pk.shapes, + &pk.shape_cf, + &witness, + ); + + Self { state, proof } + } // pub fn merge>( // pk: &ProvingKey, @@ -110,5 +111,3 @@ impl RecursiveSNARK { // Self { state, proof } // } } - -// pub struct CompressedSNARK {} diff --git a/src/parafold/transcript/circuit.rs b/src/parafold/transcript/circuit.rs index cc59fbd7..02a99e94 100644 --- a/src/parafold/transcript/circuit.rs +++ b/src/parafold/transcript/circuit.rs @@ -1,78 +1,175 @@ -use bellpepper_core::boolean::{AllocatedBit, Boolean}; +use bellpepper_core::boolean::Boolean; use bellpepper_core::num::AllocatedNum; use bellpepper_core::{ConstraintSystem, SynthesisError}; -use ff::{PrimeField, PrimeFieldBits}; +use itertools::chain; use neptune::circuit2::Elt; use neptune::sponge::api::{IOPattern, SpongeAPI, SpongeOp}; use neptune::sponge::circuit::SpongeCircuit; use neptune::sponge::vanilla::Mode::Simplex; use neptune::sponge::vanilla::SpongeTrait; -use crate::parafold::transcript::TranscriptConstants; +use crate::parafold::cycle_fold::gadgets::emulated::AllocatedBase; +use crate::parafold::cycle_fold::gadgets::secondary_commitment::AllocatedSecondaryCommitment; +use crate::parafold::cycle_fold::AllocatedPrimaryCommitment; +use crate::parafold::transcript::{TranscriptConstants, TranscriptElement}; +use crate::traits::CurveCycleEquipped; -pub struct AllocatedTranscript { - constants: TranscriptConstants, - state: Vec>, +pub struct AllocatedTranscript { + constants: TranscriptConstants, + + // Output challenge of the previous round + prev: Option>, + // Elements to be hashed in this round + state: Vec>, + + // Entire contents of the prover messages + buffer: std::vec::IntoIter>, } -impl AllocatedTranscript { - pub fn new(constants: TranscriptConstants) -> Self { +impl AllocatedTranscript { + /// Initialize the transcript created by a prover. + pub fn new( + constants: TranscriptConstants, + init: impl IntoIterator>, + buffer: Vec>, + ) -> Self { Self { constants, - state: vec![], + prev: None, + state: Vec::from_iter(init.into_iter().map(Elt::Allocated)), + buffer: buffer.into_iter(), } } - pub fn new_init( + /// Reads a single field element from the transcript + pub fn read_scalar(&mut self, mut cs: CS) -> Result, SynthesisError> + where + CS: ConstraintSystem, + { + let element = self + .buffer + .next() + .ok_or(SynthesisError::AssignmentMissing)?; + + let val = match element { + TranscriptElement::Scalar(val) => { + AllocatedNum::alloc_infallible(cs.namespace(|| "alloc"), || val) + } + _ => return Err(SynthesisError::Unsatisfiable), + }; + self.state.push(val.clone().into()); + Ok(val) + } + + pub fn read_scalar_vec( + &mut self, mut cs: CS, - init: F, - constants: TranscriptConstants, - ) -> (Self, AllocatedNum) + len: usize, + ) -> Result>, SynthesisError> where - CS: ConstraintSystem, + CS: ConstraintSystem, { - let init = AllocatedNum::alloc_infallible(&mut cs, || init); - let init_elt = Elt::Allocated(init.clone()); - ( - Self { - constants, - state: vec![init_elt], - }, - init, - ) + (0..len) + .map(|i| self.read_scalar(cs.namespace(|| i.to_string()))) + .collect() } - pub fn absorb(&mut self, elements: impl IntoIterator>) { - self.state.extend(elements.into_iter().map(Elt::Allocated)); + /// Reads a single field element from the transcript + pub fn read_base(&mut self, mut cs: CS) -> Result, SynthesisError> + where + CS: ConstraintSystem, + { + let element = self + .buffer + .next() + .ok_or(SynthesisError::AssignmentMissing)?; + + let allocated_base = match element { + TranscriptElement::Base(base) => AllocatedBase::alloc(cs.namespace(|| "alloc base"), base), + _ => return Err(SynthesisError::AssignmentMissing), + }; + + self.state.extend(allocated_base.as_preimage()); + + Ok(allocated_base) } - pub(crate) fn inputize(&self, mut cs: CS) -> Result<(), SynthesisError> + pub fn read_commitment_primary( + &mut self, + mut cs: CS, + ) -> Result, SynthesisError> where - CS: ConstraintSystem, + CS: ConstraintSystem, { - assert_eq!(self.state.len(), 1); - let state = self.state[0].ensure_allocated(&mut cs, false)?; - state.inputize(&mut cs) + let element = self + .buffer + .next() + .ok_or(SynthesisError::AssignmentMissing)?; + + let allocated_hashed_commitment = match element { + TranscriptElement::CommitmentPrimary(commitment) => { + AllocatedPrimaryCommitment::alloc(cs.namespace(|| "alloc commitment primary"), commitment) + } + _ => return Err(SynthesisError::AssignmentMissing), + }; + + self.state.extend(allocated_hashed_commitment.as_preimage()); + + Ok(allocated_hashed_commitment) } - pub fn squeeze(&mut self, mut cs: CS) -> Result, SynthesisError> + pub fn read_commitment_secondary( + &mut self, + mut cs: CS, + ) -> Result, SynthesisError> where - CS: ConstraintSystem, + CS: ConstraintSystem, { - let num_absorbs = self.state.len() as u32; + let element = self + .buffer + .next() + .ok_or(SynthesisError::AssignmentMissing)?; + + let allocated_commitment = match element { + TranscriptElement::CommitmentSecondary(commitment) => AllocatedSecondaryCommitment::alloc( + cs.namespace(|| "alloc commitment secondary"), + commitment, + ), + _ => return Err(SynthesisError::AssignmentMissing), + }; + + self.state.extend(allocated_commitment.as_preimage()); + + Ok(allocated_commitment) + } - let pattern = IOPattern(vec![SpongeOp::Absorb(num_absorbs), SpongeOp::Squeeze(1u32)]); + pub fn squeeze(&mut self, mut cs: CS) -> Result, SynthesisError> + where + CS: ConstraintSystem, + { + let elements = chain!( + self.prev.iter().cloned().map(Elt::Allocated), + self.state.drain(..) + ) + .collect::>(); + let num_absorbs = elements.len() as u32; - let acc = &mut cs.namespace(|| "squeeze"); + let hash = { + let pattern = IOPattern(vec![SpongeOp::Absorb(num_absorbs), SpongeOp::Squeeze(1u32)]); - let mut sponge = SpongeCircuit::new_with_constants(&self.constants, Simplex); - sponge.start(pattern, None, acc); - SpongeAPI::absorb(&mut sponge, num_absorbs, &self.state, acc); + let acc = &mut cs.namespace(|| "squeeze"); - self.state = SpongeAPI::squeeze(&mut sponge, 1, acc); - sponge.finish(acc).unwrap(); + let mut sponge = SpongeCircuit::new_with_constants(&self.constants, Simplex); + sponge.start(pattern, None, acc); + SpongeAPI::absorb(&mut sponge, num_absorbs, &elements, acc); - let hash = self.state[0].ensure_allocated(acc, false)?; + let state_out = SpongeAPI::squeeze(&mut sponge, 1, acc); + sponge.finish(acc).unwrap(); + + state_out[0].ensure_allocated(acc, false)? + }; + + self.prev = Some(hash.clone()); Ok(hash) } @@ -81,10 +178,9 @@ impl AllocatedTranscript { &mut self, mut cs: CS, num_bits: usize, - ) -> Result, SynthesisError> + ) -> Result, SynthesisError> where - CS: ConstraintSystem, - F: PrimeFieldBits, + CS: ConstraintSystem, { let hash = self.squeeze(&mut cs)?; @@ -92,20 +188,23 @@ impl AllocatedTranscript { .to_bits_le_strict(cs.namespace(|| "hash to bits"))? .into_iter() .take(num_bits) - .map(|boolean| match boolean { - Boolean::Is(x) => x, - _ => unreachable!("Wrong type of input. We should have never reached there"), - }) .collect::>(); Ok(bits) } - /// Combine two transcripts - pub fn merge(mut self_L: Self, self_R: Self) -> Self { - assert_eq!(self_L.state.len(), 1); - assert_eq!(self_R.state.len(), 1); - self_L.state.extend(self_R.state); - self_L + pub fn seal(mut self, mut cs: CS) -> Result, SynthesisError> + where + CS: ConstraintSystem, + { + if !self.buffer.next().is_none() { + return Err(SynthesisError::AssignmentMissing); + } + + // Absorb the remaining elements into the sponge + if !self.state.is_empty() { + let _ = self.squeeze(&mut cs)?; + } + Ok(self.prev.unwrap()) } } diff --git a/src/parafold/transcript/mod.rs b/src/parafold/transcript/mod.rs index 8eb2aed9..39a800bf 100644 --- a/src/parafold/transcript/mod.rs +++ b/src/parafold/transcript/mod.rs @@ -1,8 +1,65 @@ +use ff::{PrimeField, PrimeFieldBits}; use generic_array::typenum::U24; use neptune::poseidon::PoseidonConstants; +use crate::parafold::cycle_fold::hash_commitment; +use crate::traits::commitment::CommitmentTrait; +use crate::traits::CurveCycleEquipped; +use crate::Commitment; + pub mod circuit; pub mod prover; /// Poseidon constants for hashing used for the Fiat-Shamir transcript pub type TranscriptConstants = PoseidonConstants; + +#[derive(Clone, Debug)] +pub enum TranscriptElement { + Scalar(E::Scalar), + Base(E::Base), + CommitmentPrimary(Commitment), + CommitmentSecondary(Commitment), +} + +impl TranscriptElement { + pub fn to_field(&self) -> impl IntoIterator { + // TODO: figure out if we can avoid Vec + match self { + TranscriptElement::Scalar(scalar) => vec![*scalar], + TranscriptElement::Base(base) => base + .to_le_bits() + .chunks(128) + .map(|bits| { + bits + .iter() + .enumerate() + .fold(0_u128, |mut byte, (index, bit)| { + if *bit { + let mask = 1_u128 << index; + byte &= mask; + } + byte + }) + }) + .map(|limb| E::Scalar::from_u128(limb)) + .collect::>(), + TranscriptElement::CommitmentPrimary(c) => { + let hash = hash_commitment::(*c); + Self::Base(hash).to_field() + } + TranscriptElement::CommitmentSecondary(c) => { + let (x, y, _) = c.to_coordinates(); + [x, y].to_vec() + } + } + } + + pub fn size(&self) -> usize { + match self { + TranscriptElement::Scalar(_) => 1, + TranscriptElement::Base(_) => 2, + TranscriptElement::CommitmentPrimary(_) => 2, + TranscriptElement::CommitmentSecondary(_) => 2, + } + } +} diff --git a/src/parafold/transcript/prover.rs b/src/parafold/transcript/prover.rs index da2b1576..01212770 100644 --- a/src/parafold/transcript/prover.rs +++ b/src/parafold/transcript/prover.rs @@ -1,94 +1,94 @@ -use ff::{Field, PrimeField, PrimeFieldBits}; +use ff::PrimeFieldBits; +use itertools::chain; use neptune::sponge::api::{IOPattern, SpongeAPI, SpongeOp}; use neptune::sponge::vanilla::Mode::Simplex; use neptune::sponge::vanilla::{Sponge, SpongeTrait}; -use crate::parafold::cycle_fold::HashedCommitment; -use crate::parafold::transcript::TranscriptConstants; -use crate::traits::commitment::CommitmentTrait; -use crate::traits::Engine; -use crate::Commitment; +use crate::parafold::transcript::{TranscriptConstants, TranscriptElement}; +use crate::traits::CurveCycleEquipped; #[derive(Clone, Debug)] -pub struct Transcript { - constants: TranscriptConstants, - state: Vec, -} +pub struct Transcript { + constants: TranscriptConstants, -impl Transcript { - pub fn new(constants: TranscriptConstants) -> Self { - Self { - constants, - state: vec![], - } - } + // Initial carried-over state + prev: Option, + // Buffer of messages for the current round + round_state: Vec, + + // Stores the entire set of prover messages, which can be deserialized by the circuit verifier. + // Doesn't include the message from the current round, as these are stored in `round_state` + // and copies into `buffer` after calling `squeeze`. + buffer: Vec>, +} - pub fn new_init(init: F, constants: TranscriptConstants) -> Self { +impl Transcript { + pub fn new( + constants: TranscriptConstants, + init: impl IntoIterator, + ) -> Self { Self { constants, - state: vec![init], + prev: None, + round_state: Vec::from_iter(init), + buffer: vec![], } } - pub fn absorb(&mut self, elements: I) - where - I: IntoIterator, - { - self.state.extend(elements); + pub fn absorb(&mut self, element: TranscriptElement) { + self.round_state.extend(element.to_field()); + self.buffer.push(element); } - pub fn absorb_commitment_primary>(&mut self, c: Commitment) { - let c_hash = HashedCommitment::::new(c); - self.absorb(c_hash.as_preimage()); + /// Adds a sequence of elements to the transcript, storing them for future deserialization by + /// the corresponding verifier transcript. + pub fn absorb_many(&mut self, elements: impl IntoIterator>) { + for element in elements { + self.absorb(element); + } } - pub fn absorb_commitment_secondary>(&mut self, c: Commitment) { - let (x, y, _) = c.to_coordinates(); - self.absorb([x, y]); - } + // pub fn absorb_commitment_primary>(&mut self, c: Commitment) { + // let limbs = commitment_to_hash_limbs::(c); + // self.absorb_many(limbs); + // } + // + // pub fn absorb_commitment_secondary>(&mut self, c: Commitment) { + // let (x, y, _) = c.to_coordinates(); + // self.absorb_many([x, y]); + // } - pub fn squeeze(&mut self) -> F { + pub fn squeeze(&mut self) -> E::Scalar { let mut sponge = Sponge::new_with_constants(&self.constants, Simplex); - let num_absorbs = self.state.len() as u32; - let acc = &mut (); - let parameter = IOPattern(vec![SpongeOp::Absorb(num_absorbs), SpongeOp::Squeeze(1u32)]); - sponge.start(parameter, None, acc); - SpongeAPI::absorb(&mut sponge, num_absorbs, &self.state, acc); - let hash = SpongeAPI::squeeze(&mut sponge, 1, acc); - sponge.finish(acc).unwrap(); - let output = hash[0]; - self.state = hash; - output - } - pub fn squeeze_bits_secondary(&mut self, num_bits: usize) -> Base - where - F: PrimeFieldBits, - { - let hash = self.squeeze(); + let elements = chain!(self.prev.clone(), self.round_state.drain(..)).collect::>(); + let num_absorbs = elements.len() as u32; - // Only return `num_bits` - let bits = hash.to_le_bits(); - let mut res = Base::ZERO; - let mut coeff = Base::ONE; - for bit in bits.into_iter().take(num_bits) { - if bit { - res += coeff; - } - coeff += coeff; - } - res + let hash = { + let acc = &mut (); + let parameter = IOPattern(vec![SpongeOp::Absorb(num_absorbs), SpongeOp::Squeeze(1u32)]); + sponge.start(parameter, None, acc); + SpongeAPI::absorb(&mut sponge, num_absorbs, &elements, acc); + let hash = SpongeAPI::squeeze(&mut sponge, 1, acc); + sponge.finish(acc).unwrap(); + hash[0] + }; + + // save the current output + self.prev = Some(hash); + + hash } - pub fn seal(&self) -> F { - assert_eq!(self.state.len(), 1); - self.state[0] + pub fn squeeze_bits(&mut self, num_bits: usize) -> Vec { + let hash = self.squeeze(); + hash.to_le_bits().into_iter().take(num_bits).collect() } - pub fn merge(mut self_L: Self, self_R: &Self) -> Self { - assert_eq!(self_L.state.len(), 1); - assert_eq!(self_R.state.len(), 1); - self_L.state.extend(self_R.state.iter().cloned()); - self_L + pub fn seal(mut self) -> (E::Scalar, Vec>) { + if !self.round_state.is_empty() { + let _ = self.squeeze(); + } + (self.prev.unwrap(), self.buffer) } }