diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 80ca07de..71ff9a75 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -61,9 +61,9 @@ jobs: curl -sSfL https://github.com/ethereum/solidity/releases/download/v0.8.4/solc-static-linux -o /usr/local/bin/solc chmod +x /usr/local/bin/solc - name: Execute compile.sh to generate .r1cs and .wasm from .circom - run: ./frontends/src/circom/test_folder/compile.sh + run: ./experimental-frontends/src/circom/test_folder/compile.sh - name: Execute compile.sh to generate .json from noir - run: ./frontends/src/noir/test_folder/compile.sh + run: ./experimental-frontends/src/noir/test_folder/compile.sh - name: Run tests uses: actions-rs/cargo@v1 with: @@ -95,11 +95,11 @@ jobs: default: true - name: Add target run: rustup target add ${{ matrix.target }} - - name: Wasm-compat frontends build + - name: Wasm-compat experimental-frontends build uses: actions-rs/cargo@v1 with: command: build - args: -p frontends --no-default-features --target ${{ matrix.target }} --features "wasm, parallel" + args: -p experimental-frontends --no-default-features --target ${{ matrix.target }} --features "wasm, parallel" - name: Wasm-compat folding-schemes build uses: actions-rs/cargo@v1 with: @@ -132,14 +132,30 @@ jobs: curl -sSfL https://github.com/ethereum/solidity/releases/download/v0.8.4/solc-static-linux -o /usr/local/bin/solc chmod +x /usr/local/bin/solc - name: Execute compile.sh to generate .r1cs and .wasm from .circom - run: ./frontends/src/circom/test_folder/compile.sh + run: ./experimental-frontends/src/circom/test_folder/compile.sh - name: Execute compile.sh to generate .json from noir - run: ./frontends/src/noir/test_folder/compile.sh + run: ./experimental-frontends/src/noir/test_folder/compile.sh - name: Run examples tests run: cargo test --examples - name: Run examples run: cargo run --release --example 2>&1 | grep -E '^ ' | xargs -n1 cargo run --release --example + # run the benchmarks with the flag `--no-run` to ensure that they compile, + # but without executing them. + bench: + if: github.event.pull_request.draft == false + name: Bench compile + timeout-minutes: 30 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + - uses: Swatinem/rust-cache@v2 + - uses: actions-rs/cargo@v1 + with: + command: bench + args: -p folding-schemes --no-run + fmt: if: github.event.pull_request.draft == false name: Rustfmt @@ -164,10 +180,10 @@ jobs: feature_set: [basic, wasm] include: - feature_set: basic - features: --features default,light-test - # We only want to test `frontends` package with `wasm` feature. + features: --features default + # We only want to test `experimental-frontends` package with `wasm` feature. - feature_set: wasm - features: -p frontends --features wasm,parallel --target wasm32-unknown-unknown + features: -p experimental-frontends --features wasm,parallel --target wasm32-unknown-unknown steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 diff --git a/.gitignore b/.gitignore index e2e3afdc..d3ba383d 100644 --- a/.gitignore +++ b/.gitignore @@ -2,12 +2,12 @@ Cargo.lock # Circom generated files -frontends/src/circom/test_folder/*_js/ +experimental-frontends/src/circom/test_folder/*_js/ *.r1cs *.sym # Noir generated files -frontends/src/noir/test_folder/*/target/* +experimental-frontends/src/noir/test_folder/*/target/* # generated contracts data solidity-verifiers/generated diff --git a/Cargo.toml b/Cargo.toml index 177d6f60..531b505b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,21 +3,9 @@ members = [ "folding-schemes", "solidity-verifiers", "cli", - "frontends" + "experimental-frontends" ] resolver = "2" [patch.crates-io] -# The following patch is to use a version of ark-r1cs-std compatible with -# v0.4.0 but that includes two cherry-picked commits from after v0.4.0 which -# fixes the in-circuit scalar multiplication of the zero point and the -# y-coordinate of the zero point. The commits are respectively from -# https://github.com/arkworks-rs/r1cs-std/pull/124 and -# https://github.com/arkworks-rs/r1cs-std/pull/126, without including other -# changes done between v0.4.0 and this fix which would break compatibility. -ark-r1cs-std = { path = "../r1cs-std-tmpfix" } -# patch ark_curves to use a cherry-picked version which contains -# bn254::constraints & grumpkin for v0.4.0 (once arkworks v0.5.0 is released -# this will no longer be needed) -ark-bn254 = { git = "https://github.com/arnaucube/ark-curves-cherry-picked", branch="cherry-pick"} -ark-grumpkin = { git = "https://github.com/arnaucube/ark-curves-cherry-picked", branch="cherry-pick"} +ark-r1cs-std = {git = "https://github.com/yelhousni/r1cs-std", branch="perf/sw"} diff --git a/README.md b/README.md index 3da10531..1898025a 100644 --- a/README.md +++ b/README.md @@ -23,9 +23,6 @@ Folding schemes implemented: - [Nova: Recursive Zero-Knowledge Arguments from Folding Schemes](https://eprint.iacr.org/2021/370.pdf), Abhiram Kothapalli, Srinath Setty, Ioanna Tzialla. 2021 - [CycleFold: Folding-scheme-based recursive arguments over a cycle of elliptic curves](https://eprint.iacr.org/2023/1192.pdf), Abhiram Kothapalli, Srinath Setty. 2023 - [HyperNova: Recursive arguments for customizable constraint systems](https://eprint.iacr.org/2023/573.pdf), Abhiram Kothapalli, Srinath Setty. 2023 - -Work in progress: - - [ProtoGalaxy: Efficient ProtoStar-style folding of multiple instances](https://eprint.iacr.org/2023/1106.pdf), Liam Eagen, Ariel Gabizon. 2023 @@ -34,7 +31,7 @@ Work in progress: Frontends allow to define the circuit to be folded (ie. `FCircuit`). The recommended frontend is directly implementing the [`FCircuit` trait](https://github.com/privacy-scaling-explorations/sonobe/blob/main/folding-schemes/src/frontend/mod.rs#L16) with the Arkworks constraint system. -Alternatively, experimental frontends for [Circom](https://github.com/iden3/circom), [Noir](https://github.com/noir-lang/noir) and [Noname](https://github.com/zksecurity/noname) can be found at the [sonobe/frontends](https://github.com/privacy-scaling-explorations/sonobe/tree/main/frontends) directory, which have some computational (and time) overhead. +Alternatively, experimental frontends for [Circom](https://github.com/iden3/circom), [Noir](https://github.com/noir-lang/noir) and [Noname](https://github.com/zksecurity/noname) can be found at the [sonobe/experimental-frontends](https://github.com/privacy-scaling-explorations/sonobe/tree/main/experimental-frontends) directory, which have some computational (and time) overhead. More details about the frontend interface and the experimental frontends can be found at the [sonobe-docs/frontend](https://privacy-scaling-explorations.github.io/sonobe-docs/usage/frontend.html) page. @@ -49,7 +46,7 @@ folding-schemes = { git = "https://github.com/privacy-scaling-explorations/sonob Available packages: - `folding-schemes`: main crate, contains the different scheme implementations, together with commitment schemes, frontend trait, arithmetization, transcript, etc. - `solidity-verifiers`: contains the templating logic to output the verifier contracts for the DeciderEth proofs. Currently only supports Nova+CycleFold DeciderEth proofs. -- `frontends`: contains the experimental frontends other than the arkworks frontend. More details at the [sonobe/frontends](https://github.com/privacy-scaling-explorations/sonobe/tree/main/frontends) directory. +- `experimental-frontends`: contains the experimental frontends other than the arkworks frontend. More details at the [sonobe/experimental-frontends](https://github.com/privacy-scaling-explorations/sonobe/tree/main/experimental-frontends) directory. Available features: - `parallel` enables some parallelization optimizations available in the crate. It is enabled by default. @@ -105,7 +102,7 @@ Sonobe is [MIT Licensed](https://github.com/privacy-scaling-explorations/sonobe/ ## Acknowledgments -This project builds on top of multiple [arkworks](https://github.com/arkworks-rs) libraries. It uses Espresso system's [virtual polynomial](https://github.com/EspressoSystems/hyperplonk/blob/main/arithmetic/src/virtual_polynomial.rs) abstraction and its [SumCheck](https://github.com/EspressoSystems/hyperplonk/tree/main/subroutines/src/poly_iop/sum_check) implementation. +This project builds on top of multiple [arkworks](https://github.com/arkworks-rs) libraries. It uses Espresso System's [virtual polynomial](https://github.com/EspressoSystems/hyperplonk/blob/main/arithmetic/src/virtual_polynomial.rs) abstraction and its [SumCheck](https://github.com/EspressoSystems/hyperplonk/tree/main/subroutines/src/poly_iop/sum_check) implementation. The Solidity templates used in `nova_cyclefold_verifier.sol`, use [iden3](https://github.com/iden3/snarkjs/blob/master/templates/verifier_groth16.sol.ejs)'s Groth16 implementation and a KZG10 Solidity template adapted from [weijiekoh/libkzg](https://github.com/weijiekoh/libkzg). diff --git a/benches/README.md b/benches/README.md new file mode 100644 index 00000000..6f998097 --- /dev/null +++ b/benches/README.md @@ -0,0 +1,10 @@ +# benchmarks +*Note: we're starting to benchmark & profile Sonobe, current results are pre-optimizations.* + +- Benchmark + - Run: `cargo bench` + - To run a specific benchmark, for example Nova's benchmark, run: `cargo bench --bench=nova` +- Profiling + - eg. `cargo bench --bench=nova -- --profile-time 3` + + diff --git a/benches/common.rs b/benches/common.rs new file mode 100644 index 00000000..163dbbfc --- /dev/null +++ b/benches/common.rs @@ -0,0 +1,53 @@ +use criterion::*; + +use folding_schemes::{ + frontend::{utils::CustomFCircuit, FCircuit}, + Curve, Error, FoldingScheme, +}; + +pub(crate) fn bench_ivc_opt< + C1: Curve, + C2: Curve, + FS: FoldingScheme>, +>( + c: &mut Criterion, + name: String, + n: usize, + prep_param: FS::PreprocessorParam, +) -> Result<(), Error> { + let fcircuit_size = 1 << n; // 2^n + + let f_circuit = CustomFCircuit::::new(fcircuit_size)?; + + let mut rng = rand::rngs::OsRng; + + // prepare the FS prover & verifier params + let fs_params = FS::preprocess(&mut rng, &prep_param)?; + + let z_0 = vec![C1::ScalarField::from(3_u32)]; + let mut fs = FS::init(&fs_params, f_circuit, z_0)?; + + // warmup steps + for _ in 0..5 { + fs.prove_step(rng, (), None)?; + } + + let mut group = c.benchmark_group(format!( + "{} - FCircuit: {} (2^{}) constraints", + name, fcircuit_size, n + )); + group.significance_level(0.1).sample_size(10); + group.bench_function("prove_step", |b| { + b.iter(|| -> Result<_, _> { black_box(fs.clone()).prove_step(rng, (), None) }) + }); + + // verify the IVCProof + let ivc_proof = fs.ivc_proof(); + group.bench_function("verify", |b| { + b.iter(|| -> Result<_, _> { + FS::verify(black_box(fs_params.1.clone()), black_box(ivc_proof.clone())) + }) + }); + group.finish(); + Ok(()) +} diff --git a/benches/hypernova.rs b/benches/hypernova.rs new file mode 100644 index 00000000..d3f4421c --- /dev/null +++ b/benches/hypernova.rs @@ -0,0 +1,84 @@ +use criterion::*; +use pprof::criterion::{Output, PProfProfiler}; + +use ark_bn254::{Fr as bn_Fr, G1Projective as bn_G}; +use ark_grumpkin::Projective as grumpkin_G; +use ark_pallas::{Fr as pallas_Fr, Projective as pallas_G}; +use ark_vesta::Projective as vesta_G; + +use folding_schemes::{ + commitment::pedersen::Pedersen, + folding::{hypernova::HyperNova, nova::PreprocessorParam}, + frontend::{utils::CustomFCircuit, FCircuit}, + transcript::poseidon::poseidon_canonical_config, +}; + +mod common; +use common::bench_ivc_opt; + +fn bench_hypernova_ivc(c: &mut Criterion) { + let poseidon_config = poseidon_canonical_config::(); + + // iterate over the powers of n + for n in [0_usize, 14, 16, 18, 19, 20, 21, 22].iter() { + let fcircuit_size = 1 << n; // 2^n + let fcircuit = CustomFCircuit::::new(fcircuit_size).unwrap(); + let prep_param = PreprocessorParam::new(poseidon_config.clone(), fcircuit); + + bench_ivc_opt::< + pallas_G, + vesta_G, + HyperNova< + pallas_G, + vesta_G, + CustomFCircuit, + Pedersen, + Pedersen, + 1, + 1, + false, + >, + >( + c, + "HyperNova - Pallas-Vesta curves".to_string(), + *n, + prep_param, + ) + .unwrap(); + } + + let poseidon_config = poseidon_canonical_config::(); + for n in [0_usize, 14, 16, 18, 19, 20, 21, 22].iter() { + let fcircuit_size = 1 << n; // 2^n + let fcircuit = CustomFCircuit::::new(fcircuit_size).unwrap(); + let prep_param = PreprocessorParam::new(poseidon_config.clone(), fcircuit); + + bench_ivc_opt::< + bn_G, + grumpkin_G, + HyperNova< + bn_G, + grumpkin_G, + CustomFCircuit, + Pedersen, + Pedersen, + 1, + 1, + false, + >, + >( + c, + "HyperNova - BN254-Grumpkin curves".to_string(), + *n, + prep_param, + ) + .unwrap(); + } +} + +criterion_group! { + name = benches; + config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); + targets = bench_hypernova_ivc +} +criterion_main!(benches); diff --git a/benches/nova.rs b/benches/nova.rs new file mode 100644 index 00000000..eed5419e --- /dev/null +++ b/benches/nova.rs @@ -0,0 +1,75 @@ +use criterion::*; +use pprof::criterion::{Output, PProfProfiler}; + +use ark_bn254::{Fr as bn_Fr, G1Projective as bn_G}; +use ark_grumpkin::Projective as grumpkin_G; +use ark_pallas::{Fr as pallas_Fr, Projective as pallas_G}; +use ark_vesta::Projective as vesta_G; + +use folding_schemes::{ + commitment::pedersen::Pedersen, + folding::nova::{Nova, PreprocessorParam}, + frontend::{utils::CustomFCircuit, FCircuit}, + transcript::poseidon::poseidon_canonical_config, +}; + +mod common; +use common::bench_ivc_opt; + +fn bench_nova_ivc(c: &mut Criterion) { + let poseidon_config = poseidon_canonical_config::(); + + // iterate over the powers of n + for n in [0_usize, 14, 16, 18, 19, 20, 21, 22].iter() { + let fcircuit_size = 1 << n; // 2^n + let fcircuit = CustomFCircuit::::new(fcircuit_size).unwrap(); + let prep_param = PreprocessorParam::new(poseidon_config.clone(), fcircuit); + + bench_ivc_opt::< + pallas_G, + vesta_G, + Nova< + pallas_G, + vesta_G, + CustomFCircuit, + Pedersen, + Pedersen, + false, + >, + >(c, "Nova - Pallas-Vesta curves".to_string(), *n, prep_param) + .unwrap(); + } + + let poseidon_config = poseidon_canonical_config::(); + for n in [0_usize, 14, 16, 18, 19, 20, 21, 22].iter() { + let fcircuit_size = 1 << n; // 2^n + let fcircuit = CustomFCircuit::::new(fcircuit_size).unwrap(); + let prep_param = PreprocessorParam::new(poseidon_config.clone(), fcircuit); + + bench_ivc_opt::< + bn_G, + grumpkin_G, + Nova< + bn_G, + grumpkin_G, + CustomFCircuit, + Pedersen, + Pedersen, + false, + >, + >( + c, + "Nova - BN254-Grumpkin curves".to_string(), + *n, + prep_param, + ) + .unwrap(); + } +} + +criterion_group! { + name = benches; + config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); + targets = bench_nova_ivc +} +criterion_main!(benches); diff --git a/benches/protogalaxy.rs b/benches/protogalaxy.rs new file mode 100644 index 00000000..ace36c35 --- /dev/null +++ b/benches/protogalaxy.rs @@ -0,0 +1,78 @@ +use criterion::*; +use pprof::criterion::{Output, PProfProfiler}; + +use ark_bn254::{Fr as bn_Fr, G1Projective as bn_G}; +use ark_grumpkin::Projective as grumpkin_G; +use ark_pallas::{Fr as pallas_Fr, Projective as pallas_G}; +use ark_vesta::Projective as vesta_G; + +use folding_schemes::{ + commitment::pedersen::Pedersen, + folding::protogalaxy::ProtoGalaxy, + frontend::{utils::CustomFCircuit, FCircuit}, + transcript::poseidon::poseidon_canonical_config, +}; + +mod common; +use common::bench_ivc_opt; + +fn bench_protogalaxy_ivc(c: &mut Criterion) { + let poseidon_config = poseidon_canonical_config::(); + + // iterate over the powers of n + for n in [0_usize, 14, 16, 18, 19, 20, 21, 22].iter() { + let fcircuit_size = 1 << n; // 2^n + let fcircuit = CustomFCircuit::::new(fcircuit_size).unwrap(); + let prep_param = (poseidon_config.clone(), fcircuit); + + bench_ivc_opt::< + pallas_G, + vesta_G, + ProtoGalaxy< + pallas_G, + vesta_G, + CustomFCircuit, + Pedersen, + Pedersen, + >, + >( + c, + "ProtoGalaxy - Pallas-Vesta curves".to_string(), + *n, + prep_param, + ) + .unwrap(); + } + + let poseidon_config = poseidon_canonical_config::(); + for n in [0_usize, 14, 16, 18, 19, 20, 21, 22].iter() { + let fcircuit_size = 1 << n; // 2^n + let fcircuit = CustomFCircuit::::new(fcircuit_size).unwrap(); + let prep_param = (poseidon_config.clone(), fcircuit); + + bench_ivc_opt::< + bn_G, + grumpkin_G, + ProtoGalaxy< + bn_G, + grumpkin_G, + CustomFCircuit, + Pedersen, + Pedersen, + >, + >( + c, + "ProtoGalaxy - BN254-Grumpkin curves".to_string(), + *n, + prep_param, + ) + .unwrap(); + } +} + +criterion_group! { + name = benches; + config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); + targets = bench_protogalaxy_ivc +} +criterion_main!(benches); diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 4179485c..f14497a5 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -4,35 +4,12 @@ version = "0.1.0" edition = "2021" [dependencies] -ark-ec = "0.4" -ark-ff = "0.4" -ark-poly = "0.4" -ark-std = "0.4" -ark-groth16 = "0.4" -askama = { version = "0.12.0", features = ["config"], default-features = false } -ark-bn254 = "0.4.0" -ark-poly-commit = "0.4.0" +ark-serialize = "^0.5.0" solidity-verifiers = { path = "../solidity-verifiers" } -itertools = "0.12.1" -ark-serialize = "0.4.1" clap = { version = "4.4", features = ["derive", "string"] } clap-verbosity-flag = "2.1" -log = "0.4" env_logger = "0.10" -[dev-dependencies] -revm = "3.5.0" -tracing = { version = "0.1", default-features = false, features = [ "attributes" ] } -tracing-subscriber = { version = "0.2" } - [features] default = ["parallel"] - -parallel = [ - "ark-std/parallel", - "ark-ff/parallel", - "ark-poly/parallel", - ] - - - +parallel = ["solidity-verifiers/parallel"] \ No newline at end of file diff --git a/examples/circom_full_flow.rs b/examples/circom_full_flow.rs index 22ab09b3..9c406738 100644 --- a/examples/circom_full_flow.rs +++ b/examples/circom_full_flow.rs @@ -9,14 +9,15 @@ /// - generate the Solidity contract that verifies the proof /// - verify the proof in the EVM /// -use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as G1}; +use ark_bn254::{Bn254, Fr, G1Projective as G1}; use ark_groth16::Groth16; -use ark_grumpkin::{constraints::GVar as GVar2, Projective as G2}; +use ark_grumpkin::Projective as G2; use std::path::PathBuf; use std::time::Instant; +use experimental_frontends::{circom::CircomFCircuit, utils::VecF}; use folding_schemes::{ commitment::{kzg::KZG, pedersen::Pedersen}, folding::{ @@ -28,9 +29,8 @@ use folding_schemes::{ }, frontend::FCircuit, transcript::poseidon::poseidon_canonical_config, - Decider, FoldingScheme, + Decider, Error, FoldingScheme, }; -use frontends::circom::CircomFCircuit; use solidity_verifiers::{ evm::{compile_solidity, Evm}, utils::get_function_selector_for_nova_cyclefold_verifier, @@ -38,7 +38,7 @@ use solidity_verifiers::{ NovaCycleFoldVerifierKey, }; -fn main() { +fn main() -> Result<(), Error> { // set the initial state let z_0 = vec![Fr::from(3_u32)]; @@ -58,22 +58,22 @@ fn main() { ]; // initialize the Circom circuit - let r1cs_path = PathBuf::from("./frontends/src/circom/test_folder/with_external_inputs.r1cs"); + let r1cs_path = + PathBuf::from("./experimental-frontends/src/circom/test_folder/with_external_inputs.r1cs"); let wasm_path = PathBuf::from( - "./frontends/src/circom/test_folder/with_external_inputs_js/with_external_inputs.wasm", + "./experimental-frontends/src/circom/test_folder/with_external_inputs_js/with_external_inputs.wasm", ); - let f_circuit_params = (r1cs_path.into(), wasm_path.into(), 1, 2); - let f_circuit = CircomFCircuit::::new(f_circuit_params).unwrap(); + let f_circuit_params = (r1cs_path.into(), wasm_path.into(), 1); // state len = 1 + const EXT_INP_LEN: usize = 2; // external inputs len = 2 + let f_circuit = CircomFCircuit::::new(f_circuit_params)?; pub type N = - Nova, KZG<'static, Bn254>, Pedersen, false>; + Nova, KZG<'static, Bn254>, Pedersen, false>; pub type D = DeciderEth< G1, - GVar, G2, - GVar2, - CircomFCircuit, + CircomFCircuit, KZG<'static, Bn254>, Pedersen, Groth16, @@ -81,24 +81,23 @@ fn main() { >; let poseidon_config = poseidon_canonical_config::(); - let mut rng = rand::rngs::OsRng; + let mut rng = ark_std::rand::rngs::OsRng; // prepare the Nova prover & verifier params let nova_preprocess_params = PreprocessorParam::new(poseidon_config, f_circuit.clone()); - let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); - - // initialize the folding scheme engine, in our case we use Nova - let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params)?; // prepare the Decider prover & verifier params let (decider_pp, decider_vp) = - D::preprocess(&mut rng, nova_params.clone(), nova.clone()).unwrap(); + D::preprocess(&mut rng, (nova_params.clone(), f_circuit.state_len()))?; + + // initialize the folding scheme engine, in our case we use Nova + let mut nova = N::init(&nova_params, f_circuit.clone(), z_0)?; // run n steps of the folding iteration for (i, external_inputs_at_step) in external_inputs.iter().enumerate() { let start = Instant::now(); - nova.prove_step(rng, external_inputs_at_step.clone(), None) - .unwrap(); + nova.prove_step(rng, VecF(external_inputs_at_step.clone()), None)?; println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } @@ -107,11 +106,10 @@ fn main() { N::verify( nova_params.1, // Nova's verifier params ivc_proof, - ) - .unwrap(); + )?; let start = Instant::now(); - let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); + let proof = D::prove(rng, decider_pp, nova.clone())?; println!("generated Decider proof: {:?}", start.elapsed()); let verified = D::verify( @@ -122,8 +120,7 @@ fn main() { &nova.U_i.get_commitments(), &nova.u_i.get_commitments(), &proof, - ) - .unwrap(); + )?; assert!(verified); println!("Decider proof verification: {}", verified); @@ -139,8 +136,7 @@ fn main() { &nova.U_i, &nova.u_i, proof, - ) - .unwrap(); + )?; // prepare the setup params for the solidity verifier let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from((decider_vp, f_circuit.state_len())); @@ -161,9 +157,9 @@ fn main() { fs::write( "./examples/nova-verifier.sol", decider_solidity_code.clone(), - ) - .unwrap(); - fs::write("./examples/solidity-calldata.calldata", calldata.clone()).unwrap(); + )?; + fs::write("./examples/solidity-calldata.calldata", calldata.clone())?; let s = solidity_verifiers::utils::get_formatted_calldata(calldata.clone()); fs::write("./examples/solidity-calldata.inputs", s.join(",\n")).expect(""); + Ok(()) } diff --git a/examples/external_inputs.rs b/examples/external_inputs.rs index f4a6af1f..79ebc86c 100644 --- a/examples/external_inputs.rs +++ b/examples/external_inputs.rs @@ -3,17 +3,16 @@ #![allow(non_camel_case_types)] #![allow(clippy::upper_case_acronyms)] -use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; +use ark_bn254::{Bn254, Fr, G1Projective as Projective}; use ark_crypto_primitives::{ crh::{ poseidon::constraints::{CRHGadget, CRHParametersVar}, - poseidon::CRH, - CRHScheme, CRHSchemeGadget, + CRHSchemeGadget, }, sponge::{poseidon::PoseidonConfig, Absorb}, }; use ark_ff::PrimeField; -use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; +use ark_grumpkin::Projective as Projective2; use ark_r1cs_std::alloc::AllocVar; use ark_r1cs_std::fields::fp::FpVar; use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; @@ -75,6 +74,8 @@ where F: Absorb, { type Params = PoseidonConfig; + type ExternalInputs = [F; 1]; + type ExternalInputsVar = [FpVar; 1]; fn new(params: Self::Params) -> Result { Ok(Self { @@ -85,23 +86,6 @@ where fn state_len(&self) -> usize { 1 } - fn external_inputs_len(&self) -> usize { - 1 - } - - /// computes the next state value for the step of F for the given z_i and external_inputs - /// z_{i+1} - fn step_native( - &self, - _i: usize, - z_i: Vec, - external_inputs: Vec, - ) -> Result, Error> { - let hash_input: [F; 2] = [z_i[0], external_inputs[0]]; - let h = CRH::::evaluate(&self.poseidon_config, hash_input).unwrap(); - Ok(vec![h]) - } - /// generates the constraints and returns the next state value for the step of F for the given /// z_i and external_inputs fn generate_step_constraints( @@ -109,7 +93,7 @@ where cs: ConstraintSystemRef, _i: usize, z_i: Vec>, - external_inputs: Vec>, + external_inputs: Self::ExternalInputsVar, ) -> Result>, SynthesisError> { let crh_params = CRHParametersVar::::new_constant(cs.clone(), self.poseidon_config.clone())?; @@ -123,61 +107,71 @@ where #[cfg(test)] pub mod tests { use super::*; + use ark_crypto_primitives::crh::{poseidon::CRH, CRHScheme}; use ark_r1cs_std::R1CSVar; use ark_relations::r1cs::ConstraintSystem; + fn external_inputs_step_native( + z_i: Vec, + external_inputs: Vec, + poseidon_config: &PoseidonConfig, + ) -> Vec { + let hash_input: [F; 2] = [z_i[0], external_inputs[0]]; + let h = CRH::::evaluate(poseidon_config, hash_input).unwrap(); + vec![h] + } + // test to check that the ExternalInputsCircuit computes the same values inside and outside the circuit #[test] - fn test_f_circuit() { + fn test_f_circuit() -> Result<(), Error> { let poseidon_config = poseidon_canonical_config::(); let cs = ConstraintSystem::::new_ref(); - let circuit = ExternalInputsCircuit::::new(poseidon_config).unwrap(); + let circuit = ExternalInputsCircuit::::new(poseidon_config.clone())?; let z_i = vec![Fr::from(1_u32)]; let external_inputs = vec![Fr::from(3_u32)]; - let z_i1 = circuit - .step_native(0, z_i.clone(), external_inputs.clone()) - .unwrap(); + let z_i1 = + external_inputs_step_native(z_i.clone(), external_inputs.clone(), &poseidon_config); - let z_iVar = Vec::>::new_witness(cs.clone(), || Ok(z_i)).unwrap(); - let external_inputsVar = - Vec::>::new_witness(cs.clone(), || Ok(external_inputs)).unwrap(); + let z_iVar = Vec::>::new_witness(cs.clone(), || Ok(z_i))?; + let external_inputsVar: [FpVar; 1] = + Vec::>::new_witness(cs.clone(), || Ok(external_inputs))? + .try_into() + .unwrap(); - let computed_z_i1Var = circuit - .generate_step_constraints(cs.clone(), 0, z_iVar, external_inputsVar) - .unwrap(); - assert_eq!(computed_z_i1Var.value().unwrap(), z_i1); + let computed_z_i1Var = + circuit.generate_step_constraints(cs.clone(), 0, z_iVar, external_inputsVar)?; + assert_eq!(computed_z_i1Var.value()?, z_i1); + Ok(()) } } /// cargo run --release --example external_inputs -fn main() { +fn main() -> Result<(), Error> { let num_steps = 5; let initial_state = vec![Fr::from(1_u32)]; // prepare the external inputs to be used at each folding step let external_inputs = vec![ - vec![Fr::from(3_u32)], - vec![Fr::from(33_u32)], - vec![Fr::from(73_u32)], - vec![Fr::from(103_u32)], - vec![Fr::from(125_u32)], + [Fr::from(3_u32)], + [Fr::from(33_u32)], + [Fr::from(73_u32)], + [Fr::from(103_u32)], + [Fr::from(125_u32)], ]; assert_eq!(external_inputs.len(), num_steps); let poseidon_config = poseidon_canonical_config::(); - let F_circuit = ExternalInputsCircuit::::new(poseidon_config.clone()).unwrap(); + let F_circuit = ExternalInputsCircuit::::new(poseidon_config.clone())?; /// The idea here is that eventually we could replace the next line chunk that defines the /// `type N = Nova<...>` by using another folding scheme that fulfills the `FoldingScheme` /// trait, and the rest of our code would be working without needing to be updated. type N = Nova< Projective, - GVar, Projective2, - GVar2, ExternalInputsCircuit, KZG<'static, Bn254>, Pedersen, @@ -188,17 +182,15 @@ fn main() { println!("Prepare Nova's ProverParams & VerifierParams"); let nova_preprocess_params = PreprocessorParam::new(poseidon_config, F_circuit.clone()); - let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params)?; println!("Initialize FoldingScheme"); - let mut folding_scheme = N::init(&nova_params, F_circuit, initial_state.clone()).unwrap(); + let mut folding_scheme = N::init(&nova_params, F_circuit, initial_state.clone())?; // compute a step of the IVC for (i, external_inputs_at_step) in external_inputs.iter().enumerate() { let start = Instant::now(); - folding_scheme - .prove_step(rng, external_inputs_at_step.clone(), None) - .unwrap(); + folding_scheme.prove_step(rng, external_inputs_at_step.clone(), None)?; println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } println!( @@ -212,6 +204,6 @@ fn main() { N::verify( nova_params.1, // Nova's verifier params ivc_proof, - ) - .unwrap(); + )?; + Ok(()) } diff --git a/examples/full_flow.rs b/examples/full_flow.rs index 8125c222..4ed5fea1 100644 --- a/examples/full_flow.rs +++ b/examples/full_flow.rs @@ -9,10 +9,10 @@ /// - generate the Solidity contract that verifies the proof /// - verify the proof in the EVM /// -use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as G1}; +use ark_bn254::{Bn254, Fr, G1Projective as G1}; use ark_ff::PrimeField; use ark_groth16::Groth16; -use ark_grumpkin::{constraints::GVar as GVar2, Projective as G2}; +use ark_grumpkin::Projective as G2; use ark_r1cs_std::alloc::AllocVar; use ark_r1cs_std::fields::fp::FpVar; use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; @@ -46,29 +46,21 @@ pub struct CubicFCircuit { } impl FCircuit for CubicFCircuit { type Params = (); + type ExternalInputs = (); + type ExternalInputsVar = (); + fn new(_params: Self::Params) -> Result { Ok(Self { _f: PhantomData }) } fn state_len(&self) -> usize { 1 } - fn external_inputs_len(&self) -> usize { - 0 - } - fn step_native( - &self, - _i: usize, - z_i: Vec, - _external_inputs: Vec, - ) -> Result, Error> { - Ok(vec![z_i[0] * z_i[0] * z_i[0] + z_i[0] + F::from(5_u32)]) - } fn generate_step_constraints( &self, cs: ConstraintSystemRef, _i: usize, z_i: Vec>, - _external_inputs: Vec>, + _external_inputs: Self::ExternalInputsVar, ) -> Result>, SynthesisError> { let five = FpVar::::new_constant(cs.clone(), F::from(5u32))?; let z_i = z_i[0].clone(); @@ -77,49 +69,40 @@ impl FCircuit for CubicFCircuit { } } -fn main() { +fn main() -> Result<(), Error> { let n_steps = 5; // set the initial state let z_0 = vec![Fr::from(3_u32)]; - let f_circuit = CubicFCircuit::::new(()).unwrap(); - - pub type N = - Nova, KZG<'static, Bn254>, Pedersen, false>; - pub type D = DeciderEth< - G1, - GVar, - G2, - GVar2, - CubicFCircuit, - KZG<'static, Bn254>, - Pedersen, - Groth16, - N, - >; + let f_circuit = CubicFCircuit::::new(())?; + + pub type N = Nova, KZG<'static, Bn254>, Pedersen, false>; + pub type D = + DeciderEth, KZG<'static, Bn254>, Pedersen, Groth16, N>; let poseidon_config = poseidon_canonical_config::(); - let mut rng = rand::rngs::OsRng; + let mut rng = ark_std::rand::rngs::OsRng; // prepare the Nova prover & verifier params let nova_preprocess_params = PreprocessorParam::new(poseidon_config.clone(), f_circuit); - let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); - - // initialize the folding scheme engine, in our case we use Nova - let mut nova = N::init(&nova_params, f_circuit, z_0).unwrap(); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params)?; // prepare the Decider prover & verifier params - let (decider_pp, decider_vp) = D::preprocess(&mut rng, nova_params, nova.clone()).unwrap(); + let (decider_pp, decider_vp) = + D::preprocess(&mut rng, (nova_params.clone(), f_circuit.state_len()))?; + + // initialize the folding scheme engine, in our case we use Nova + let mut nova = N::init(&nova_params, f_circuit, z_0)?; // run n steps of the folding iteration for i in 0..n_steps { let start = Instant::now(); - nova.prove_step(rng, vec![], None).unwrap(); + nova.prove_step(rng, (), None)?; println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } let start = Instant::now(); - let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); + let proof = D::prove(rng, decider_pp, nova.clone())?; println!("generated Decider proof: {:?}", start.elapsed()); let verified = D::verify( @@ -130,8 +113,7 @@ fn main() { &nova.U_i.get_commitments(), &nova.u_i.get_commitments(), &proof, - ) - .unwrap(); + )?; assert!(verified); println!("Decider proof verification: {}", verified); @@ -147,8 +129,7 @@ fn main() { &nova.U_i, &nova.u_i, proof, - ) - .unwrap(); + )?; // prepare the setup params for the solidity verifier let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from((decider_vp, f_circuit.state_len())); @@ -169,9 +150,9 @@ fn main() { fs::write( "./examples/nova-verifier.sol", decider_solidity_code.clone(), - ) - .unwrap(); - fs::write("./examples/solidity-calldata.calldata", calldata.clone()).unwrap(); + )?; + fs::write("./examples/solidity-calldata.calldata", calldata.clone())?; let s = solidity_verifiers::utils::get_formatted_calldata(calldata.clone()); fs::write("./examples/solidity-calldata.inputs", s.join(",\n")).expect(""); + Ok(()) } diff --git a/examples/multi_inputs.rs b/examples/multi_inputs.rs index a337c894..ff56bd9d 100644 --- a/examples/multi_inputs.rs +++ b/examples/multi_inputs.rs @@ -10,8 +10,8 @@ use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; use core::marker::PhantomData; use std::time::Instant; -use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; -use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; +use ark_bn254::{Bn254, Fr, G1Projective as Projective}; +use ark_grumpkin::Projective as Projective2; use folding_schemes::commitment::{kzg::KZG, pedersen::Pedersen}; use folding_schemes::folding::nova::{Nova, PreprocessorParam}; @@ -30,6 +30,8 @@ pub struct MultiInputsFCircuit { } impl FCircuit for MultiInputsFCircuit { type Params = (); + type ExternalInputs = (); + type ExternalInputsVar = (); fn new(_params: Self::Params) -> Result { Ok(Self { _f: PhantomData }) @@ -37,34 +39,13 @@ impl FCircuit for MultiInputsFCircuit { fn state_len(&self) -> usize { 5 } - fn external_inputs_len(&self) -> usize { - 0 - } - - /// computes the next state values in place, assigning z_{i+1} into z_i, and computing the new - /// z_{i+1} - fn step_native( - &self, - _i: usize, - z_i: Vec, - _external_inputs: Vec, - ) -> Result, Error> { - let a = z_i[0] + F::from(4_u32); - let b = z_i[1] + F::from(40_u32); - let c = z_i[2] * F::from(4_u32); - let d = z_i[3] * F::from(40_u32); - let e = z_i[4] + F::from(100_u32); - - Ok(vec![a, b, c, d, e]) - } - /// generates the constraints for the step of F for the given z_i fn generate_step_constraints( &self, cs: ConstraintSystemRef, _i: usize, z_i: Vec>, - _external_inputs: Vec>, + _external_inputs: Self::ExternalInputsVar, ) -> Result>, SynthesisError> { let four = FpVar::::new_constant(cs.clone(), F::from(4u32))?; let forty = FpVar::::new_constant(cs.clone(), F::from(40u32))?; @@ -86,12 +67,22 @@ pub mod tests { use ark_r1cs_std::{alloc::AllocVar, R1CSVar}; use ark_relations::r1cs::ConstraintSystem; + fn multi_inputs_step_native(z_i: Vec) -> Vec { + let a = z_i[0] + F::from(4_u32); + let b = z_i[1] + F::from(40_u32); + let c = z_i[2] * F::from(4_u32); + let d = z_i[3] * F::from(40_u32); + let e = z_i[4] + F::from(100_u32); + + vec![a, b, c, d, e] + } + // test to check that the MultiInputsFCircuit computes the same values inside and outside the circuit #[test] - fn test_f_circuit() { + fn test_f_circuit() -> Result<(), Error> { let cs = ConstraintSystem::::new_ref(); - let circuit = MultiInputsFCircuit::::new(()).unwrap(); + let circuit = MultiInputsFCircuit::::new(())?; let z_i = vec![ Fr::from(1_u32), Fr::from(1_u32), @@ -100,18 +91,18 @@ pub mod tests { Fr::from(1_u32), ]; - let z_i1 = circuit.step_native(0, z_i.clone(), vec![]).unwrap(); + let z_i1 = multi_inputs_step_native(z_i.clone()); - let z_iVar = Vec::>::new_witness(cs.clone(), || Ok(z_i)).unwrap(); - let computed_z_i1Var = circuit - .generate_step_constraints(cs.clone(), 0, z_iVar.clone(), vec![]) - .unwrap(); - assert_eq!(computed_z_i1Var.value().unwrap(), z_i1); + let z_iVar = Vec::>::new_witness(cs.clone(), || Ok(z_i))?; + let computed_z_i1Var = + circuit.generate_step_constraints(cs.clone(), 0, z_iVar.clone(), ())?; + assert_eq!(computed_z_i1Var.value()?, z_i1); + Ok(()) } } /// cargo run --release --example multi_inputs -fn main() { +fn main() -> Result<(), Error> { let num_steps = 10; let initial_state = vec![ Fr::from(1_u32), @@ -121,7 +112,7 @@ fn main() { Fr::from(1_u32), ]; - let F_circuit = MultiInputsFCircuit::::new(()).unwrap(); + let F_circuit = MultiInputsFCircuit::::new(())?; let poseidon_config = poseidon_canonical_config::(); let mut rng = rand::rngs::OsRng; @@ -131,9 +122,7 @@ fn main() { /// trait, and the rest of our code would be working without needing to be updated. type N = Nova< Projective, - GVar, Projective2, - GVar2, MultiInputsFCircuit, KZG<'static, Bn254>, Pedersen, @@ -142,15 +131,15 @@ fn main() { println!("Prepare Nova ProverParams & VerifierParams"); let nova_preprocess_params = PreprocessorParam::new(poseidon_config, F_circuit); - let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params)?; println!("Initialize FoldingScheme"); - let mut folding_scheme = N::init(&nova_params, F_circuit, initial_state.clone()).unwrap(); + let mut folding_scheme = N::init(&nova_params, F_circuit, initial_state.clone())?; // compute a step of the IVC for i in 0..num_steps { let start = Instant::now(); - folding_scheme.prove_step(rng, vec![], None).unwrap(); + folding_scheme.prove_step(rng, (), None)?; println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } @@ -159,6 +148,6 @@ fn main() { N::verify( nova_params.1, // Nova's verifier params ivc_proof, - ) - .unwrap(); + )?; + Ok(()) } diff --git a/examples/noir_full_flow.rs b/examples/noir_full_flow.rs index 4e144838..80d18a48 100644 --- a/examples/noir_full_flow.rs +++ b/examples/noir_full_flow.rs @@ -9,11 +9,12 @@ /// - generate the Solidity contract that verifies the proof /// - verify the proof in the EVM /// -use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as G1}; +use ark_bn254::{Bn254, Fr, G1Projective as G1}; use ark_groth16::Groth16; -use ark_grumpkin::{constraints::GVar as GVar2, Projective as G2}; +use ark_grumpkin::Projective as G2; +use experimental_frontends::{noir::NoirFCircuit, utils::VecF}; use folding_schemes::{ commitment::{kzg::KZG, pedersen::Pedersen}, folding::{ @@ -25,10 +26,9 @@ use folding_schemes::{ }, frontend::FCircuit, transcript::poseidon::poseidon_canonical_config, - Decider, FoldingScheme, + Decider, Error, FoldingScheme, }; -use frontends::noir::{load_noir_circuit, NoirFCircuit}; -use std::time::Instant; +use std::{path::Path, time::Instant}; use solidity_verifiers::{ evm::{compile_solidity, Evm}, @@ -37,27 +37,23 @@ use solidity_verifiers::{ NovaCycleFoldVerifierKey, }; -fn main() { +fn main() -> Result<(), Error> { // set the initial state let z_0 = vec![Fr::from(1)]; // initialize the noir fcircuit - let circuit_path = format!("./frontends/src/noir/test_folder/test_mimc/target/test_mimc.json",); - - let circuit = load_noir_circuit(circuit_path).unwrap(); - let f_circuit = NoirFCircuit { - circuit, - state_len: 1, - external_inputs_len: 0, - }; - - pub type N = Nova, KZG<'static, Bn254>, Pedersen>; + const EXT_INP_LEN: usize = 0; + let f_circuit = NoirFCircuit::::new(( + Path::new("./experimental-frontends/src/noir/test_folder/test_mimc/target/test_mimc.json") + .into(), + 1, + ))?; + + pub type N = Nova, KZG<'static, Bn254>, Pedersen>; pub type D = DeciderEth< G1, - GVar, G2, - GVar2, - NoirFCircuit, + NoirFCircuit, KZG<'static, Bn254>, Pedersen, Groth16, @@ -65,23 +61,23 @@ fn main() { >; let poseidon_config = poseidon_canonical_config::(); - let mut rng = rand::rngs::OsRng; + let mut rng = ark_std::rand::rngs::OsRng; // prepare the Nova prover & verifier params let nova_preprocess_params = PreprocessorParam::new(poseidon_config, f_circuit.clone()); - let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); - - // initialize the folding scheme engine, in our case we use Nova - let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params)?; // prepare the Decider prover & verifier params let (decider_pp, decider_vp) = - D::preprocess(&mut rng, nova_params.clone(), nova.clone()).unwrap(); + D::preprocess(&mut rng, (nova_params.clone(), f_circuit.state_len()))?; + + // initialize the folding scheme engine, in our case we use Nova + let mut nova = N::init(&nova_params, f_circuit.clone(), z_0)?; // run n steps of the folding iteration for i in 0..5 { let start = Instant::now(); - nova.prove_step(rng, vec![], None).unwrap(); + nova.prove_step(rng, VecF(vec![]), None)?; println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } // verify the last IVC proof @@ -89,11 +85,10 @@ fn main() { N::verify( nova_params.1, // Nova's verifier params ivc_proof, - ) - .unwrap(); + )?; let start = Instant::now(); - let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); + let proof = D::prove(rng, decider_pp, nova.clone())?; println!("generated Decider proof: {:?}", start.elapsed()); let verified = D::verify( @@ -104,8 +99,7 @@ fn main() { &nova.U_i.get_commitments(), &nova.u_i.get_commitments(), &proof, - ) - .unwrap(); + )?; assert!(verified); println!("Decider proof verification: {}", verified); @@ -121,8 +115,7 @@ fn main() { &nova.U_i, &nova.u_i, proof, - ) - .unwrap(); + )?; // prepare the setup params for the solidity verifier let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from((decider_vp, f_circuit.state_len())); @@ -143,9 +136,9 @@ fn main() { fs::write( "./examples/nova-verifier.sol", decider_solidity_code.clone(), - ) - .unwrap(); - fs::write("./examples/solidity-calldata.calldata", calldata.clone()).unwrap(); + )?; + fs::write("./examples/solidity-calldata.calldata", calldata.clone())?; let s = solidity_verifiers::utils::get_formatted_calldata(calldata.clone()); fs::write("./examples/solidity-calldata.inputs", s.join(",\n")).expect(""); + Ok(()) } diff --git a/examples/noname_full_flow.rs b/examples/noname_full_flow.rs index f4e14d84..500ea113 100644 --- a/examples/noname_full_flow.rs +++ b/examples/noname_full_flow.rs @@ -9,12 +9,13 @@ /// - generate the Solidity contract that verifies the proof /// - verify the proof in the EVM /// -use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as G1}; +use ark_bn254::{Bn254, Fr, G1Projective as G1}; use noname::backends::r1cs::R1csBn254Field; use ark_groth16::Groth16; -use ark_grumpkin::{constraints::GVar as GVar2, Projective as G2}; +use ark_grumpkin::Projective as G2; +use experimental_frontends::{noname::NonameFCircuit, utils::VecF}; use folding_schemes::{ commitment::{kzg::KZG, pedersen::Pedersen}, folding::{ @@ -26,9 +27,8 @@ use folding_schemes::{ }, frontend::FCircuit, transcript::poseidon::poseidon_canonical_config, - Decider, FoldingScheme, + Decider, Error, FoldingScheme, }; -use frontends::noname::NonameFCircuit; use std::time::Instant; use solidity_verifiers::{ @@ -38,7 +38,7 @@ use solidity_verifiers::{ NovaCycleFoldVerifierKey, }; -fn main() { +fn main() -> Result<(), Error> { const NONAME_CIRCUIT_EXTERNAL_INPUTS: &str = "fn main(pub ivc_inputs: [Field; 2], external_inputs: [Field; 2]) -> [Field; 2] { let xx = external_inputs[0] + ivc_inputs[0]; @@ -58,24 +58,21 @@ fn main() { ]; // initialize the noname circuit - let f_circuit_params = (NONAME_CIRCUIT_EXTERNAL_INPUTS.to_owned(), 2, 2); - let f_circuit = NonameFCircuit::::new(f_circuit_params).unwrap(); + let f_circuit_params = (NONAME_CIRCUIT_EXTERNAL_INPUTS.to_owned(), 2); // state len = 2 + const EXT_INP_LEN: usize = 2; + let f_circuit = NonameFCircuit::::new(f_circuit_params)?; pub type N = Nova< G1, - GVar, G2, - GVar2, - NonameFCircuit, + NonameFCircuit, KZG<'static, Bn254>, Pedersen, >; pub type D = DeciderEth< G1, - GVar, G2, - GVar2, - NonameFCircuit, + NonameFCircuit, KZG<'static, Bn254>, Pedersen, Groth16, @@ -83,24 +80,23 @@ fn main() { >; let poseidon_config = poseidon_canonical_config::(); - let mut rng = rand::rngs::OsRng; + let mut rng = ark_std::rand::rngs::OsRng; // prepare the Nova prover & verifier params let nova_preprocess_params = PreprocessorParam::new(poseidon_config, f_circuit.clone()); - let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); - - // initialize the folding scheme engine, in our case we use Nova - let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params)?; // prepare the Decider prover & verifier params let (decider_pp, decider_vp) = - D::preprocess(&mut rng, nova_params.clone(), nova.clone()).unwrap(); + D::preprocess(&mut rng, (nova_params.clone(), f_circuit.state_len()))?; + + // initialize the folding scheme engine, in our case we use Nova + let mut nova = N::init(&nova_params, f_circuit.clone(), z_0)?; // run n steps of the folding iteration for (i, external_inputs_at_step) in external_inputs.iter().enumerate() { let start = Instant::now(); - nova.prove_step(rng, external_inputs_at_step.clone(), None) - .unwrap(); + nova.prove_step(rng, VecF(external_inputs_at_step.clone()), None)?; println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } @@ -109,11 +105,10 @@ fn main() { N::verify( nova_params.1, // Nova's verifier params ivc_proof, - ) - .unwrap(); + )?; let start = Instant::now(); - let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); + let proof = D::prove(rng, decider_pp, nova.clone())?; println!("generated Decider proof: {:?}", start.elapsed()); let verified = D::verify( @@ -124,8 +119,7 @@ fn main() { &nova.U_i.get_commitments(), &nova.u_i.get_commitments(), &proof, - ) - .unwrap(); + )?; assert!(verified); println!("Decider proof verification: {}", verified); @@ -141,8 +135,7 @@ fn main() { &nova.U_i, &nova.u_i, proof, - ) - .unwrap(); + )?; // prepare the setup params for the solidity verifier let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from((decider_vp, f_circuit.state_len())); @@ -163,9 +156,9 @@ fn main() { fs::write( "./examples/nova-verifier.sol", decider_solidity_code.clone(), - ) - .unwrap(); - fs::write("./examples/solidity-calldata.calldata", calldata.clone()).unwrap(); + )?; + fs::write("./examples/solidity-calldata.calldata", calldata.clone())?; let s = solidity_verifiers::utils::get_formatted_calldata(calldata.clone()); fs::write("./examples/solidity-calldata.inputs", s.join(",\n")).expect(""); + Ok(()) } diff --git a/examples/sha256.rs b/examples/sha256.rs index d974d650..22a5a104 100644 --- a/examples/sha256.rs +++ b/examples/sha256.rs @@ -4,20 +4,20 @@ #![allow(clippy::upper_case_acronyms)] use ark_crypto_primitives::crh::{ - sha256::{ - constraints::{Sha256Gadget, UnitVar}, - Sha256, - }, - CRHScheme, CRHSchemeGadget, + sha256::constraints::{Sha256Gadget, UnitVar}, + CRHSchemeGadget, +}; +use ark_ff::PrimeField; +use ark_r1cs_std::{ + convert::{ToBytesGadget, ToConstraintFieldGadget}, + fields::fp::FpVar, }; -use ark_ff::{BigInteger, PrimeField, ToConstraintField}; -use ark_r1cs_std::{fields::fp::FpVar, ToBytesGadget, ToConstraintFieldGadget}; use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; use core::marker::PhantomData; use std::time::Instant; -use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; -use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; +use ark_bn254::{Bn254, Fr, G1Projective as Projective}; +use ark_grumpkin::Projective as Projective2; use folding_schemes::commitment::{kzg::KZG, pedersen::Pedersen}; use folding_schemes::folding::nova::{Nova, PreprocessorParam}; @@ -36,6 +36,8 @@ pub struct Sha256FCircuit { } impl FCircuit for Sha256FCircuit { type Params = (); + type ExternalInputs = (); + type ExternalInputsVar = (); fn new(_params: Self::Params) -> Result { Ok(Self { _f: PhantomData }) @@ -43,34 +45,16 @@ impl FCircuit for Sha256FCircuit { fn state_len(&self) -> usize { 1 } - fn external_inputs_len(&self) -> usize { - 0 - } - - /// computes the next state values in place, assigning z_{i+1} into z_i, and computing the new - /// z_{i+1} - fn step_native( - &self, - _i: usize, - z_i: Vec, - _external_inputs: Vec, - ) -> Result, Error> { - let out_bytes = Sha256::evaluate(&(), z_i[0].into_bigint().to_bytes_le()).unwrap(); - let out: Vec = out_bytes.to_field_elements().unwrap(); - - Ok(vec![out[0]]) - } - /// generates the constraints for the step of F for the given z_i fn generate_step_constraints( &self, _cs: ConstraintSystemRef, _i: usize, z_i: Vec>, - _external_inputs: Vec>, + _external_inputs: Self::ExternalInputsVar, ) -> Result>, SynthesisError> { let unit_var = UnitVar::default(); - let out_bytes = Sha256Gadget::evaluate(&unit_var, &z_i[0].to_bytes()?)?; + let out_bytes = Sha256Gadget::evaluate(&unit_var, &z_i[0].to_bytes_le()?)?; let out = out_bytes.0.to_constraint_field()?; Ok(vec![out[0].clone()]) } @@ -80,42 +64,49 @@ impl FCircuit for Sha256FCircuit { #[cfg(test)] pub mod tests { use super::*; + use ark_crypto_primitives::crh::{sha256::Sha256, CRHScheme}; + use ark_ff::{BigInteger, ToConstraintField}; use ark_r1cs_std::{alloc::AllocVar, R1CSVar}; use ark_relations::r1cs::ConstraintSystem; + fn sha256_step_native(z_i: Vec) -> Vec { + let out_bytes = Sha256::evaluate(&(), z_i[0].into_bigint().to_bytes_le()).unwrap(); + let out: Vec = out_bytes.to_field_elements().unwrap(); + + vec![out[0]] + } + // test to check that the Sha256FCircuit computes the same values inside and outside the circuit #[test] - fn test_f_circuit() { + fn test_f_circuit() -> Result<(), Error> { let cs = ConstraintSystem::::new_ref(); - let circuit = Sha256FCircuit::::new(()).unwrap(); + let circuit = Sha256FCircuit::::new(())?; let z_i = vec![Fr::from(1_u32)]; - let z_i1 = circuit.step_native(0, z_i.clone(), vec![]).unwrap(); + let z_i1 = sha256_step_native(z_i.clone()); - let z_iVar = Vec::>::new_witness(cs.clone(), || Ok(z_i)).unwrap(); - let computed_z_i1Var = circuit - .generate_step_constraints(cs.clone(), 0, z_iVar.clone(), vec![]) - .unwrap(); - assert_eq!(computed_z_i1Var.value().unwrap(), z_i1); + let z_iVar = Vec::>::new_witness(cs.clone(), || Ok(z_i))?; + let computed_z_i1Var = + circuit.generate_step_constraints(cs.clone(), 0, z_iVar.clone(), ())?; + assert_eq!(computed_z_i1Var.value()?, z_i1); + Ok(()) } } /// cargo run --release --example sha256 -fn main() { +fn main() -> Result<(), Error> { let num_steps = 10; let initial_state = vec![Fr::from(1_u32)]; - let F_circuit = Sha256FCircuit::::new(()).unwrap(); + let F_circuit = Sha256FCircuit::::new(())?; /// The idea here is that eventually we could replace the next line chunk that defines the /// `type N = Nova<...>` by using another folding scheme that fulfills the `FoldingScheme` /// trait, and the rest of our code would be working without needing to be updated. type N = Nova< Projective, - GVar, Projective2, - GVar2, Sha256FCircuit, KZG<'static, Bn254>, Pedersen, @@ -127,14 +118,14 @@ fn main() { println!("Prepare Nova ProverParams & VerifierParams"); let nova_preprocess_params = PreprocessorParam::new(poseidon_config, F_circuit); - let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params)?; println!("Initialize FoldingScheme"); - let mut folding_scheme = N::init(&nova_params, F_circuit, initial_state.clone()).unwrap(); + let mut folding_scheme = N::init(&nova_params, F_circuit, initial_state.clone())?; // compute a step of the IVC for i in 0..num_steps { let start = Instant::now(); - folding_scheme.prove_step(rng, vec![], None).unwrap(); + folding_scheme.prove_step(rng, (), None)?; println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } @@ -143,6 +134,6 @@ fn main() { N::verify( nova_params.1, // Nova's verifier params ivc_proof, - ) - .unwrap(); + )?; + Ok(()) } diff --git a/experimental-frontends/Cargo.toml b/experimental-frontends/Cargo.toml new file mode 100644 index 00000000..eadf7a3f --- /dev/null +++ b/experimental-frontends/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "experimental-frontends" +version = "0.1.0" +edition = "2021" + +[dependencies] +ark-ff = { version = "^0.5.0", default-features = false, features = ["parallel", "asm"] } +ark-std = { version = "^0.5.0", default-features = false, features = ["parallel"] } +ark-relations = { version = "^0.5.0", default-features = false } +ark-r1cs-std = { version = "^0.5.0", default-features = false, features = ["parallel"] } +ark-serialize = { version = "^0.5.0", default-features = false } +ark-circom = { git = "https://github.com/winderica/circom-compat", branch = "arkworks-next", default-features = false } +num-bigint = "0.4" +noname = { git = "https://github.com/dmpierre/noname" } +acvm = { git = "https://github.com/winderica/noir", branch = "arkworks-next", default-features = false } +folding-schemes = { path = "../folding-schemes/"} +serde = { version = "^1.0.0", features = ["derive"] } +serde_json = "^1.0.0" + +[dev-dependencies] +ark-bn254 = { version="^0.5.0", features=["r1cs"]} + +# This allows the crate to be built when targeting WASM. +# See more at: https://docs.rs/getrandom/#webassembly-support +[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] +getrandom = { version = "0.2", features = ["js"] } + +[features] +default = ["ark-circom/default", "parallel"] +parallel = [] +wasm = ["ark-circom/wasm"] diff --git a/frontends/README.md b/experimental-frontends/README.md similarity index 97% rename from frontends/README.md rename to experimental-frontends/README.md index e43cfa18..dedc9e77 100644 --- a/frontends/README.md +++ b/experimental-frontends/README.md @@ -1,9 +1,8 @@ -# frontends +# experimental-frontends This crate contains *experimental frontends* for Sonobe. The recommended frontend is to directly use [arkworks](https://github.com/arkworks-rs) to define the FCircuit, just following the [`FCircuit` trait](https://github.com/privacy-scaling-explorations/sonobe/blob/main/folding-schemes/src/frontend/mod.rs). -## Experimental frontends > Warning: the following frontends are experimental and some computational and time overhead is expected when using them compared to directly using the [arkworks frontend](https://github.com/privacy-scaling-explorations/sonobe/blob/main/folding-schemes/src/frontend/mod.rs). Available experimental frontends: diff --git a/frontends/src/circom/mod.rs b/experimental-frontends/src/circom/mod.rs similarity index 56% rename from frontends/src/circom/mod.rs rename to experimental-frontends/src/circom/mod.rs index 313a5602..ed7a76a7 100644 --- a/frontends/src/circom/mod.rs +++ b/experimental-frontends/src/circom/mod.rs @@ -8,138 +8,59 @@ use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisE use ark_std::fmt::Debug; use folding_schemes::{frontend::FCircuit, utils::PathOrBin, Error}; use num_bigint::BigInt; -use std::fmt; -use std::rc::Rc; pub mod utils; +use crate::utils::{VecF, VecFpVar}; use utils::CircomWrapper; -type ClosurePointer = Rc, Vec) -> Result, Error>>; - -#[derive(Clone)] -struct CustomStepNative { - func: ClosurePointer, -} - -impl fmt::Debug for CustomStepNative { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "Function pointer: {:?}", - std::any::type_name::, Vec) -> Result, Error>>() - ) - } -} - -/// Define CircomFCircuit +/// Define CircomFCircuit. The parameter `L` indicates the length of the ExternalInputs vector of +/// field elements. #[derive(Clone, Debug)] -pub struct CircomFCircuit { +pub struct CircomFCircuit { circom_wrapper: CircomWrapper, pub state_len: usize, - pub external_inputs_len: usize, r1cs: CircomR1CS, - custom_step_native_code: Option>, } -impl CircomFCircuit { - pub fn set_custom_step_native(&mut self, func: ClosurePointer) { - self.custom_step_native_code = Some(CustomStepNative:: { func }); - } - - pub fn execute_custom_step_native( - &self, - _i: usize, - z_i: Vec, - external_inputs: Vec, - ) -> Result, Error> { - if let Some(code) = &self.custom_step_native_code { - (code.func)(_i, z_i, external_inputs) - } else { - #[cfg(test)] - assert_eq!(z_i.len(), self.state_len()); - #[cfg(test)] - assert_eq!(external_inputs.len(), self.external_inputs_len()); - - let inputs_bi = z_i - .iter() - .map(|val| self.circom_wrapper.ark_primefield_to_num_bigint(*val)) - .collect::>(); - let mut inputs_map = vec![("ivc_input".to_string(), inputs_bi)]; - - if self.external_inputs_len() > 0 { - let external_inputs_bi = external_inputs - .iter() - .map(|val| self.circom_wrapper.ark_primefield_to_num_bigint(*val)) - .collect::>(); - inputs_map.push(("external_inputs".to_string(), external_inputs_bi)); - } - - // Computes witness - let witness = self - .circom_wrapper - .extract_witness(&inputs_map) - .map_err(|e| { - Error::WitnessCalculationError(format!("Failed to calculate witness: {}", e)) - })?; - - // Extracts the z_i1(next state) from the witness vector. - let z_i1 = witness[1..1 + self.state_len()].to_vec(); - Ok(z_i1) - } - } -} - -impl FCircuit for CircomFCircuit { - /// (r1cs_path, wasm_path, state_len, external_inputs_len) - type Params = (PathOrBin, PathOrBin, usize, usize); +impl FCircuit for CircomFCircuit { + /// (r1cs_path, wasm_path, state_len) + type Params = (PathOrBin, PathOrBin, usize); + type ExternalInputs = VecF; + type ExternalInputsVar = VecFpVar; fn new(params: Self::Params) -> Result { - let (r1cs_path, wasm_path, state_len, external_inputs_len) = params; + let (r1cs_path, wasm_path, state_len) = params; let circom_wrapper = CircomWrapper::new(r1cs_path, wasm_path)?; let r1cs = circom_wrapper.extract_r1cs()?; Ok(Self { circom_wrapper, state_len, - external_inputs_len, r1cs, - custom_step_native_code: None, }) } fn state_len(&self) -> usize { self.state_len } - fn external_inputs_len(&self) -> usize { - self.external_inputs_len - } - - fn step_native( - &self, - _i: usize, - z_i: Vec, - external_inputs: Vec, - ) -> Result, Error> { - self.execute_custom_step_native(_i, z_i, external_inputs) - } fn generate_step_constraints( &self, cs: ConstraintSystemRef, _i: usize, z_i: Vec>, - external_inputs: Vec>, + external_inputs: Self::ExternalInputsVar, ) -> Result>, SynthesisError> { #[cfg(test)] assert_eq!(z_i.len(), self.state_len()); #[cfg(test)] - assert_eq!(external_inputs.len(), self.external_inputs_len()); + assert_eq!(external_inputs.0.len(), L); let input_values = self.fpvars_to_bigints(&z_i)?; let mut inputs_map = vec![("ivc_input".to_string(), input_values)]; - if self.external_inputs_len() > 0 { - let external_inputs_bi = self.fpvars_to_bigints(&external_inputs)?; + if L > 0 { + let external_inputs_bi = self.fpvars_to_bigints(&external_inputs.0)?; inputs_map.push(("external_inputs".to_string(), external_inputs_bi)); } @@ -184,7 +105,7 @@ impl FCircuit for CircomFCircuit { } } -impl CircomFCircuit { +impl CircomFCircuit { fn fpvars_to_bigints(&self, fpvars: &[FpVar]) -> Result, SynthesisError> { let mut input_values = Vec::new(); // converts each FpVar to PrimeField value, then to num_bigint::BigInt. @@ -208,173 +129,188 @@ pub mod tests { use ark_relations::r1cs::ConstraintSystem; use std::path::PathBuf; - // Tests the step_native function of CircomFCircuit. - #[test] - fn test_circom_step_native() { - let r1cs_path = PathBuf::from("./src/circom/test_folder/cubic_circuit.r1cs"); - let wasm_path = - PathBuf::from("./src/circom/test_folder/cubic_circuit_js/cubic_circuit.wasm"); + /// Native implementation of `src/circom/test_folder/cubic_circuit.r1cs` + fn cubic_step_native(z_i: Vec) -> Vec { + let z = z_i[0]; + vec![z * z * z + z + F::from(5)] + } - let circom_fcircuit = - CircomFCircuit::::new((r1cs_path.into(), wasm_path.into(), 1, 0)).unwrap(); // state_len:1, external_inputs_len:0 + /// Native implementation of `src/circom/test_folder/with_external_inputs.r1cs` + fn external_inputs_step_native(z_i: Vec, external_inputs: Vec) -> Vec { + let temp1 = z_i[0] * z_i[0]; + let temp2 = z_i[0] * external_inputs[0]; + vec![temp1 * z_i[0] + temp2 + external_inputs[1]] + } + + /// Native implementation of `src/circom/test_folder/no_external_inputs.r1cs` + fn no_external_inputs_step_native(z_i: Vec) -> Vec { + let temp1 = z_i[0] * z_i[1]; + let temp2 = temp1 * z_i[2]; + vec![ + temp1 * z_i[0], + temp1 * z_i[1] + temp1, + temp1 * z_i[2] + temp2, + ] + } + // Tests the step_native function of CircomFCircuit. + #[test] + fn test_circom_step_native() -> Result<(), Error> { let z_i = vec![Fr::from(3u32)]; - let z_i1 = circom_fcircuit.step_native(1, z_i, vec![]).unwrap(); + let z_i1 = cubic_step_native(z_i); assert_eq!(z_i1, vec![Fr::from(35u32)]); + Ok(()) } // Tests the generate_step_constraints function of CircomFCircuit. #[test] - fn test_circom_step_constraints() { + fn test_circom_step_constraints() -> Result<(), Error> { let r1cs_path = PathBuf::from("./src/circom/test_folder/cubic_circuit.r1cs"); let wasm_path = PathBuf::from("./src/circom/test_folder/cubic_circuit_js/cubic_circuit.wasm"); let circom_fcircuit = - CircomFCircuit::::new((r1cs_path.into(), wasm_path.into(), 1, 0)).unwrap(); // state_len:1, external_inputs_len:0 + CircomFCircuit::::new((r1cs_path.into(), wasm_path.into(), 1))?; // state_len:1, external_inputs_len:0 let cs = ConstraintSystem::::new_ref(); let z_i = vec![Fr::from(3u32)]; - let z_i_var = Vec::>::new_witness(cs.clone(), || Ok(z_i)).unwrap(); - let z_i1_var = circom_fcircuit - .generate_step_constraints(cs.clone(), 1, z_i_var, vec![]) - .unwrap(); - assert_eq!(z_i1_var.value().unwrap(), vec![Fr::from(35u32)]); + let z_i_var = Vec::>::new_witness(cs.clone(), || Ok(z_i))?; + let z_i1_var = + circom_fcircuit.generate_step_constraints(cs.clone(), 1, z_i_var, VecFpVar(vec![]))?; + assert_eq!(z_i1_var.value()?, vec![Fr::from(35u32)]); + Ok(()) } // Tests the WrapperCircuit with CircomFCircuit. #[test] - fn test_wrapper_circomtofcircuit() { + fn test_wrapper_circomtofcircuit() -> Result<(), Error> { let r1cs_path = PathBuf::from("./src/circom/test_folder/cubic_circuit.r1cs"); let wasm_path = PathBuf::from("./src/circom/test_folder/cubic_circuit_js/cubic_circuit.wasm"); let circom_fcircuit = - CircomFCircuit::::new((r1cs_path.into(), wasm_path.into(), 1, 0)).unwrap(); // state_len:1, external_inputs_len:0 + CircomFCircuit::::new((r1cs_path.into(), wasm_path.into(), 1))?; // state_len:1, external_inputs_len:0 // Allocates z_i1 by using step_native function. let z_i = vec![Fr::from(3_u32)]; let wrapper_circuit = folding_schemes::frontend::utils::WrapperCircuit { FC: circom_fcircuit.clone(), z_i: Some(z_i.clone()), - z_i1: Some(circom_fcircuit.step_native(0, z_i.clone(), vec![]).unwrap()), + z_i1: Some(cubic_step_native(z_i)), }; let cs = ConstraintSystem::::new_ref(); - wrapper_circuit.generate_constraints(cs.clone()).unwrap(); - assert!( - cs.is_satisfied().unwrap(), - "Constraint system is not satisfied" - ); + wrapper_circuit.generate_constraints(cs.clone())?; + assert!(cs.is_satisfied()?, "Constraint system is not satisfied"); + Ok(()) } #[test] - fn test_circom_external_inputs() { + fn test_circom_external_inputs() -> Result<(), Error> { let r1cs_path = PathBuf::from("./src/circom/test_folder/with_external_inputs.r1cs"); let wasm_path = PathBuf::from( "./src/circom/test_folder/with_external_inputs_js/with_external_inputs.wasm", ); let circom_fcircuit = - CircomFCircuit::::new((r1cs_path.into(), wasm_path.into(), 1, 2)).unwrap(); // state_len:1, external_inputs_len:2 + CircomFCircuit::::new((r1cs_path.into(), wasm_path.into(), 1))?; // state_len:1, external_inputs_len:2 let cs = ConstraintSystem::::new_ref(); let z_i = vec![Fr::from(3u32)]; let external_inputs = vec![Fr::from(6u32), Fr::from(7u32)]; // run native step - let z_i1_native = circom_fcircuit - .step_native(1, z_i.clone(), external_inputs.clone()) - .unwrap(); + let z_i1_native = external_inputs_step_native(z_i.clone(), external_inputs.clone()); // run gadget step - let z_i_var = Vec::>::new_witness(cs.clone(), || Ok(z_i)).unwrap(); + let z_i_var = Vec::>::new_witness(cs.clone(), || Ok(z_i))?; let external_inputs_var = - Vec::>::new_witness(cs.clone(), || Ok(external_inputs.clone())).unwrap(); - let z_i1_var = circom_fcircuit - .generate_step_constraints(cs.clone(), 1, z_i_var, external_inputs_var) - .unwrap(); + Vec::>::new_witness(cs.clone(), || Ok(external_inputs.clone()))?; + let z_i1_var = circom_fcircuit.generate_step_constraints( + cs.clone(), + 1, + z_i_var, + VecFpVar(external_inputs_var), + )?; - assert_eq!(z_i1_var.value().unwrap(), z_i1_native); + assert_eq!(z_i1_var.value()?, z_i1_native); // re-init cs and run gadget step with wrong ivc inputs (first ivc should not be zero) let cs = ConstraintSystem::::new_ref(); let wrong_z_i = vec![Fr::from(0)]; - let wrong_z_i_var = Vec::>::new_witness(cs.clone(), || Ok(wrong_z_i)).unwrap(); + let wrong_z_i_var = Vec::>::new_witness(cs.clone(), || Ok(wrong_z_i))?; let external_inputs_var = - Vec::>::new_witness(cs.clone(), || Ok(external_inputs)).unwrap(); + Vec::>::new_witness(cs.clone(), || Ok(external_inputs))?; let _z_i1_var = circom_fcircuit.generate_step_constraints( cs.clone(), 1, wrong_z_i_var, - external_inputs_var, + VecFpVar(external_inputs_var), ); // TODO:: https://github.com/privacy-scaling-explorations/sonobe/issues/104 // Disable check for now // assert!(z_i1_var.is_err()); + Ok(()) } #[test] - fn test_circom_no_external_inputs() { + fn test_circom_no_external_inputs() -> Result<(), Error> { let r1cs_path = PathBuf::from("./src/circom/test_folder/no_external_inputs.r1cs"); let wasm_path = PathBuf::from("./src/circom/test_folder/no_external_inputs_js/no_external_inputs.wasm"); let circom_fcircuit = - CircomFCircuit::::new((r1cs_path.into(), wasm_path.into(), 3, 0)).unwrap(); + CircomFCircuit::::new((r1cs_path.into(), wasm_path.into(), 3))?; let cs = ConstraintSystem::::new_ref(); let z_i = vec![Fr::from(3u32), Fr::from(4u32), Fr::from(5u32)]; - let z_i_var = Vec::>::new_witness(cs.clone(), || Ok(z_i.clone())).unwrap(); + let z_i_var = Vec::>::new_witness(cs.clone(), || Ok(z_i.clone()))?; // run native step - let z_i1_native = circom_fcircuit.step_native(1, z_i.clone(), vec![]).unwrap(); + let z_i1_native = no_external_inputs_step_native(z_i.clone()); // run gadget step - let z_i1_var = circom_fcircuit - .generate_step_constraints(cs.clone(), 1, z_i_var, vec![]) - .unwrap(); + let z_i1_var = + circom_fcircuit.generate_step_constraints(cs.clone(), 1, z_i_var, VecFpVar(vec![]))?; - assert_eq!(z_i1_var.value().unwrap(), z_i1_native); + assert_eq!(z_i1_var.value()?, z_i1_native); // re-init cs and run gadget step with wrong ivc inputs (first ivc input should not be zero) let cs = ConstraintSystem::::new_ref(); let wrong_z_i = vec![Fr::from(0u32), Fr::from(4u32), Fr::from(5u32)]; - let wrong_z_i_var = Vec::>::new_witness(cs.clone(), || Ok(wrong_z_i)).unwrap(); - let _z_i1_var = - circom_fcircuit.generate_step_constraints(cs.clone(), 1, wrong_z_i_var, vec![]); + let wrong_z_i_var = Vec::>::new_witness(cs.clone(), || Ok(wrong_z_i))?; + let _z_i1_var = circom_fcircuit.generate_step_constraints( + cs.clone(), + 1, + wrong_z_i_var, + VecFpVar(vec![]), + ); // TODO:: https://github.com/privacy-scaling-explorations/sonobe/issues/104 // Disable check for now // assert!(z_i1_var.is_err()) + Ok(()) } #[test] - fn test_custom_code() { + fn test_custom_code() -> Result<(), Error> { let r1cs_path = PathBuf::from("./src/circom/test_folder/cubic_circuit.r1cs"); let wasm_path = PathBuf::from("./src/circom/test_folder/cubic_circuit_js/cubic_circuit.wasm"); - let mut circom_fcircuit = - CircomFCircuit::::new((r1cs_path.into(), wasm_path.into(), 1, 0)).unwrap(); // state_len:1, external_inputs_len:0 - - circom_fcircuit.set_custom_step_native(Rc::new(|_i, z_i, _external| { - let z = z_i[0]; - Ok(vec![z * z * z + z + Fr::from(5)]) - })); + let circom_fcircuit = + CircomFCircuit::::new((r1cs_path.into(), wasm_path.into(), 1))?; // state_len:1, external_inputs_len:0 // Allocates z_i1 by using step_native function. let z_i = vec![Fr::from(3_u32)]; let wrapper_circuit = folding_schemes::frontend::utils::WrapperCircuit { FC: circom_fcircuit.clone(), z_i: Some(z_i.clone()), - z_i1: Some(circom_fcircuit.step_native(0, z_i.clone(), vec![]).unwrap()), + z_i1: Some(cubic_step_native(z_i)), }; let cs = ConstraintSystem::::new_ref(); - wrapper_circuit.generate_constraints(cs.clone()).unwrap(); - assert!( - cs.is_satisfied().unwrap(), - "Constraint system is not satisfied" - ); + wrapper_circuit.generate_constraints(cs.clone())?; + assert!(cs.is_satisfied()?, "Constraint system is not satisfied"); + Ok(()) } } diff --git a/frontends/src/circom/test_folder/circuits/is_zero.circom b/experimental-frontends/src/circom/test_folder/circuits/is_zero.circom similarity index 100% rename from frontends/src/circom/test_folder/circuits/is_zero.circom rename to experimental-frontends/src/circom/test_folder/circuits/is_zero.circom diff --git a/experimental-frontends/src/circom/test_folder/compile.sh b/experimental-frontends/src/circom/test_folder/compile.sh new file mode 100755 index 00000000..1993e3ce --- /dev/null +++ b/experimental-frontends/src/circom/test_folder/compile.sh @@ -0,0 +1,4 @@ +#!/bin/bash +circom ./experimental-frontends/src/circom/test_folder/cubic_circuit.circom --r1cs --sym --wasm --prime bn128 --output ./experimental-frontends/src/circom/test_folder/ +circom ./experimental-frontends/src/circom/test_folder/with_external_inputs.circom --r1cs --sym --wasm --prime bn128 --output ./experimental-frontends/src/circom/test_folder/ +circom ./experimental-frontends/src/circom/test_folder/no_external_inputs.circom --r1cs --sym --wasm --prime bn128 --output ./experimental-frontends/src/circom/test_folder/ diff --git a/frontends/src/circom/test_folder/cubic_circuit.circom b/experimental-frontends/src/circom/test_folder/cubic_circuit.circom similarity index 100% rename from frontends/src/circom/test_folder/cubic_circuit.circom rename to experimental-frontends/src/circom/test_folder/cubic_circuit.circom diff --git a/frontends/src/circom/test_folder/no_external_inputs.circom b/experimental-frontends/src/circom/test_folder/no_external_inputs.circom similarity index 100% rename from frontends/src/circom/test_folder/no_external_inputs.circom rename to experimental-frontends/src/circom/test_folder/no_external_inputs.circom diff --git a/frontends/src/circom/test_folder/with_external_inputs.circom b/experimental-frontends/src/circom/test_folder/with_external_inputs.circom similarity index 100% rename from frontends/src/circom/test_folder/with_external_inputs.circom rename to experimental-frontends/src/circom/test_folder/with_external_inputs.circom diff --git a/frontends/src/circom/utils.rs b/experimental-frontends/src/circom/utils.rs similarity index 94% rename from frontends/src/circom/utils.rs rename to experimental-frontends/src/circom/utils.rs index 0050be5b..34448be0 100644 --- a/frontends/src/circom/utils.rs +++ b/experimental-frontends/src/circom/utils.rs @@ -145,7 +145,7 @@ mod tests { // Test the satisfication by using the CircomBuilder of circom-compat #[test] - fn test_circombuilder_satisfied() { + fn test_circombuilder_satisfied() -> Result<(), Error> { let cfg = CircomConfig::::new( "./src/circom/test_folder/cubic_circuit_js/cubic_circuit.wasm", "./src/circom/test_folder/cubic_circuit.r1cs", @@ -156,21 +156,22 @@ mod tests { let circom = builder.build().unwrap(); let cs = ConstraintSystem::::new_ref(); - circom.generate_constraints(cs.clone()).unwrap(); - assert!(cs.is_satisfied().unwrap()); + circom.generate_constraints(cs.clone())?; + assert!(cs.is_satisfied()?); + Ok(()) } // Test the satisfication by using the CircomWrapper #[test] - fn test_extract_r1cs_and_witness() { + fn test_extract_r1cs_and_witness() -> Result<(), Error> { let r1cs_path = PathBuf::from("./src/circom/test_folder/cubic_circuit.r1cs"); let wasm_path = PathBuf::from("./src/circom/test_folder/cubic_circuit_js/cubic_circuit.wasm"); let inputs = vec![("ivc_input".to_string(), vec![BigInt::from(3)])]; - let wrapper = CircomWrapper::::new(r1cs_path.into(), wasm_path.into()).unwrap(); + let wrapper = CircomWrapper::::new(r1cs_path.into(), wasm_path.into())?; - let (r1cs, witness) = wrapper.extract_r1cs_and_witness(&inputs).unwrap(); + let (r1cs, witness) = wrapper.extract_r1cs_and_witness(&inputs)?; let cs = ConstraintSystem::::new_ref(); @@ -181,7 +182,8 @@ mod tests { allocate_inputs_as_witnesses: false, }; - circom_circuit.generate_constraints(cs.clone()).unwrap(); - assert!(cs.is_satisfied().unwrap()); + circom_circuit.generate_constraints(cs.clone())?; + assert!(cs.is_satisfied()?); + Ok(()) } } diff --git a/frontends/src/lib.rs b/experimental-frontends/src/lib.rs similarity index 75% rename from frontends/src/lib.rs rename to experimental-frontends/src/lib.rs index f6b56f35..da1a1786 100644 --- a/frontends/src/lib.rs +++ b/experimental-frontends/src/lib.rs @@ -1,3 +1,4 @@ pub mod circom; pub mod noir; pub mod noname; +pub mod utils; diff --git a/experimental-frontends/src/noir/bridge.rs b/experimental-frontends/src/noir/bridge.rs new file mode 100644 index 00000000..33d95e37 --- /dev/null +++ b/experimental-frontends/src/noir/bridge.rs @@ -0,0 +1,154 @@ +// From https://github.com/dmpierre/arkworks_backend/tree/feat/sonobe-integration +use std::collections::{BTreeMap, HashMap}; +use std::convert::TryInto; + +use acvm::acir::{ + acir_field::GenericFieldElement, + circuit::{Circuit, Opcode, PublicInputs}, + native_types::{Expression, Witness, WitnessMap}, +}; +use ark_ff::{Field, PrimeField}; +use ark_r1cs_std::alloc::AllocVar; +use ark_r1cs_std::fields::fp::FpVar; +use ark_relations::{ + lc, + r1cs::{ + ConstraintSynthesizer, ConstraintSystemRef, LinearCombination, SynthesisError, Variable, + }, +}; + +// AcirCircuit and AcirArithGate are structs that arkworks can synthesise. +// +// The difference between these structures and the ACIR structure that the compiler uses is the following: +// - The compilers ACIR struct is currently fixed to bn254 +// - These structures only support arithmetic gates, while the compiler has other +// gate types. These can be added later once the backend knows how to deal with things like XOR +// or once ACIR is taught how to do convert these black box functions to Arithmetic gates. +// +// XXX: Ideally we want to implement `ConstraintSynthesizer` on ACIR however +// this does not seem possible since ACIR is juts a description of the constraint system and the API Asks for prover values also. +// +// Perfect API would look like: +// - index(srs, circ) +// - prove(index_pk, prover_values, rng) +// - verify(index_vk, verifier, rng) +#[derive(Clone)] +pub struct AcirCircuitSonobe<'a, F: Field + PrimeField> { + pub(crate) gates: Vec>>, + pub(crate) public_inputs: PublicInputs, + pub(crate) values: BTreeMap, + pub already_assigned_witnesses: HashMap>, +} + +impl<'a, ConstraintF: Field + PrimeField> ConstraintSynthesizer + for AcirCircuitSonobe<'a, ConstraintF> +{ + fn generate_constraints( + self, + cs: ConstraintSystemRef, + ) -> Result<(), SynthesisError> { + let mut variables = Vec::with_capacity(self.values.len()); + + // First create all of the witness indices by adding the values into the constraint system + for (i, val) in self.values.iter() { + let var = if self.already_assigned_witnesses.contains_key(i) { + let var = self.already_assigned_witnesses.get(i).unwrap(); + if let FpVar::Var(allocated) = var { + allocated.variable + } else { + return Err(SynthesisError::Unsatisfiable); + } + } else if self.public_inputs.contains(i.0.try_into().unwrap()) { + cs.new_witness_variable(|| Ok(*val))? + } else { + cs.new_witness_variable(|| Ok(*val))? + }; + variables.push(var); + } + + // Now iterate each gate and add it to the constraint system + for gate in self.gates { + let mut arith_gate = LinearCombination::::new(); + + // Process mul terms + for mul_term in gate.mul_terms { + let coeff = mul_term.0; + let left_val = self.values[&mul_term.1]; + let right_val = self.values[&mul_term.2]; + + let out_val = left_val * right_val; + let out_var = FpVar::::new_witness(cs.clone(), || Ok(out_val))?; + // out var can't be a type different from FpVar::Var + if let FpVar::Var(allocated) = out_var { + arith_gate += (coeff.into_repr(), allocated.variable); + } + } + + // Process Add terms + for add_term in gate.linear_combinations { + let coeff = add_term.0; + let add_var = &variables[add_term.1.as_usize()]; + arith_gate += (coeff.into_repr(), *add_var); + } + + // Process constant term + arith_gate += (gate.q_c.into_repr(), Variable::One); + + cs.enforce_constraint(lc!() + Variable::One, arith_gate, lc!())?; + } + + Ok(()) + } +} + +impl<'a, F: PrimeField> + From<( + &Circuit>, + WitnessMap>, + )> for AcirCircuitSonobe<'a, F> +{ + fn from( + circ_val: ( + &Circuit>, + WitnessMap>, + ), + ) -> AcirCircuitSonobe<'a, F> { + // Currently non-arithmetic gates are not supported + // so we extract all of the arithmetic gates only + let (circuit, witness_map) = circ_val; + + let public_inputs = circuit.public_inputs(); + let arith_gates: Vec<_> = circuit + .opcodes + .iter() + .filter_map(|opcode| { + if let Opcode::AssertZero(code) = opcode { + Some(code.clone()) + } else { + None + } + }) + .collect(); + + let num_variables: usize = circuit.num_vars().try_into().unwrap(); + + let values: BTreeMap = (0..num_variables) + .map(|witness_index| { + // Get the value if it exists. If i does not, then we fill it with the zero value + let witness = Witness(witness_index as u32); + let value = witness_map + .get(&witness) + .map_or(F::zero(), |field| field.into_repr()); + + (witness, value) + }) + .collect(); + + AcirCircuitSonobe { + gates: arith_gates, + values, + public_inputs, + already_assigned_witnesses: HashMap::new(), + } + } +} diff --git a/experimental-frontends/src/noir/mod.rs b/experimental-frontends/src/noir/mod.rs new file mode 100644 index 00000000..83228c8e --- /dev/null +++ b/experimental-frontends/src/noir/mod.rs @@ -0,0 +1,241 @@ +use acvm::{ + acir::{ + acir_field::GenericFieldElement, + circuit::{Circuit, Program}, + native_types::{Witness as AcvmWitness, WitnessMap}, + }, + blackbox_solver::StubbedBlackBoxSolver, + pwg::ACVM, +}; +use ark_ff::PrimeField; +use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar}; +use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; +use serde::{self, Deserialize, Serialize}; +use std::collections::HashMap; + +use self::bridge::AcirCircuitSonobe; +use crate::utils::{VecF, VecFpVar}; +use folding_schemes::{frontend::FCircuit, utils::PathOrBin, Error}; + +mod bridge; + +#[derive(Clone, Debug)] +pub struct NoirFCircuit { + pub circuit: Circuit>, + pub state_len: usize, +} + +#[derive(Clone, Serialize, Deserialize, Debug)] +pub struct ProgramArtifactGeneric { + #[serde( + serialize_with = "Program::serialize_program_base64", + deserialize_with = "Program::deserialize_program_base64" + )] + pub bytecode: Program>, +} + +impl FCircuit for NoirFCircuit { + type Params = (PathOrBin, usize); + type ExternalInputs = VecF; + type ExternalInputsVar = VecFpVar; + + fn new(params: Self::Params) -> Result { + let (source, state_len) = params; + let input_string = match source { + PathOrBin::Path(path) => { + let file_path = path.with_extension("json"); + std::fs::read(&file_path).map_err(|_| Error::Other(format!("{} is not a valid path\nRun either `nargo compile` to generate missing build artifacts or `nargo prove` to construct a proof", file_path.display())))? + } + PathOrBin::Bin(bin) => bin, + }; + let program: ProgramArtifactGeneric = serde_json::from_slice(&input_string) + .map_err(|err| Error::JSONSerdeError(err.to_string()))?; + let circuit: Circuit> = program.bytecode.functions[0].clone(); + let ivc_input_length = circuit.public_parameters.0.len(); + let ivc_return_length = circuit.return_values.0.len(); + + if ivc_input_length != ivc_return_length { + return Err(Error::NotSameLength( + "IVC input: ".to_string(), + ivc_input_length, + "IVC output: ".to_string(), + ivc_return_length, + )); + } + + Ok(NoirFCircuit { circuit, state_len }) + } + + fn state_len(&self) -> usize { + self.state_len + } + + fn generate_step_constraints( + &self, + cs: ConstraintSystemRef, + _i: usize, + z_i: Vec>, + external_inputs: Self::ExternalInputsVar, // inputs that are not part of the state + ) -> Result>, SynthesisError> { + let mut acvm = ACVM::new( + &StubbedBlackBoxSolver, + &self.circuit.opcodes, + WitnessMap::new(), + &[], + &[], + ); + + let mut already_assigned_witness_values = HashMap::new(); + + self.circuit + .public_parameters + .0 + .iter() + .map(|witness| { + let idx: usize = witness.as_usize(); + let witness = AcvmWitness(witness.witness_index()); + already_assigned_witness_values.insert(witness, &z_i[idx]); + let val = z_i[idx].value()?; + let value = if val == F::zero() { + "0".to_string() + } else { + val.to_string() + }; + + let f = GenericFieldElement::::try_from_str(&value) + .ok_or(SynthesisError::Unsatisfiable)?; + acvm.overwrite_witness(witness, f); + Ok(()) + }) + .collect::, SynthesisError>>()?; + + // write witness values for external_inputs + self.circuit + .private_parameters + .iter() + .map(|witness| { + let idx = witness.as_usize() - z_i.len(); + let witness = AcvmWitness(witness.witness_index()); + already_assigned_witness_values.insert(witness, &external_inputs.0[idx]); + + let val = external_inputs.0[idx].value()?; + let value = if val == F::zero() { + "0".to_string() + } else { + val.to_string() + }; + + let f = GenericFieldElement::::try_from_str(&value) + .ok_or(SynthesisError::Unsatisfiable)?; + acvm.overwrite_witness(witness, f); + Ok(()) + }) + .collect::, SynthesisError>>()?; + + // computes the witness + let _ = acvm.solve(); + let witness_map = acvm.finalize(); + + // get the z_{i+1} output state + let assigned_z_i1 = self + .circuit + .return_values + .0 + .iter() + .map(|witness| { + let noir_field_element = witness_map + .get(witness) + .ok_or(SynthesisError::AssignmentMissing)?; + FpVar::::new_witness(cs.clone(), || Ok(noir_field_element.into_repr())) + }) + .collect::>, SynthesisError>>()?; + + // initialize circuit and set already assigned values + let mut acir_circuit = AcirCircuitSonobe::from((&self.circuit, witness_map)); + acir_circuit.already_assigned_witnesses = already_assigned_witness_values; + + acir_circuit.generate_constraints(cs.clone())?; + + Ok(assigned_z_i1) + } +} + +#[cfg(test)] +mod tests { + use ark_bn254::Fr; + use ark_ff::PrimeField; + use ark_r1cs_std::R1CSVar; + use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar}; + use ark_relations::r1cs::ConstraintSystem; + use folding_schemes::{frontend::FCircuit, Error}; + use std::env; + + use crate::noir::NoirFCircuit; + use crate::utils::VecFpVar; + + /// Native implementation of `src/noir/test_folder/test_circuit` + fn external_inputs_step_native(z_i: Vec, external_inputs: Vec) -> Vec { + let xx = external_inputs[0] * z_i[0]; + let yy = external_inputs[1] * z_i[1]; + vec![xx, yy] + } + + #[test] + fn test_step_native() -> Result<(), Error> { + let inputs = vec![Fr::from(2), Fr::from(5)]; + let res = external_inputs_step_native(inputs.clone(), inputs); + assert_eq!(res, vec![Fr::from(4), Fr::from(25)]); + Ok(()) + } + + #[test] + fn test_step_constraints() -> Result<(), Error> { + let cs = ConstraintSystem::::new_ref(); + let cur_path = env::current_dir()?; + // external inputs length: 2, state length: 2 + let noirfcircuit = NoirFCircuit::::new(( + cur_path + .join("src/noir/test_folder/test_circuit/target/test_circuit.json") + .into(), + 2, + ))?; + let inputs = vec![Fr::from(2), Fr::from(5)]; + let z_i = Vec::>::new_witness(cs.clone(), || Ok(inputs.clone()))?; + let external_inputs = Vec::>::new_witness(cs.clone(), || Ok(inputs))?; + let output = noirfcircuit.generate_step_constraints( + cs.clone(), + 0, + z_i, + VecFpVar(external_inputs), + )?; + assert_eq!(output[0].value()?, Fr::from(4)); + assert_eq!(output[1].value()?, Fr::from(25)); + Ok(()) + } + + #[test] + fn test_step_constraints_no_external_inputs() -> Result<(), Error> { + let cs = ConstraintSystem::::new_ref(); + let cur_path = env::current_dir()?; + // external inputs length: 0, state length: 2 + let noirfcircuit = NoirFCircuit::::new(( + cur_path + .join("src/noir/test_folder/test_no_external_inputs/target/test_no_external_inputs.json") + .into(), + 2, + )) + ?; + let inputs = vec![Fr::from(2), Fr::from(5)]; + let z_i = Vec::>::new_witness(cs.clone(), || Ok(inputs.clone()))?; + let external_inputs = vec![]; + let output = noirfcircuit.generate_step_constraints( + cs.clone(), + 0, + z_i, + VecFpVar(external_inputs), + )?; + assert_eq!(output[0].value()?, Fr::from(4)); + assert_eq!(output[1].value()?, Fr::from(25)); + Ok(()) + } +} diff --git a/frontends/src/noir/test_folder/compile.sh b/experimental-frontends/src/noir/test_folder/compile.sh similarity index 73% rename from frontends/src/noir/test_folder/compile.sh rename to experimental-frontends/src/noir/test_folder/compile.sh index ea408eb4..598a7087 100755 --- a/frontends/src/noir/test_folder/compile.sh +++ b/experimental-frontends/src/noir/test_folder/compile.sh @@ -1,6 +1,6 @@ #!/bin/bash CUR_DIR=$(pwd) -TEST_PATH="${CUR_DIR}/frontends/src/noir/test_folder/" +TEST_PATH="${CUR_DIR}/experimental-frontends/src/noir/test_folder/" for test_path in test_circuit test_mimc test_no_external_inputs; do FOLDER="${TEST_PATH}${test_path}/" cd ${FOLDER} && nargo compile && cd ${TEST_PATH} diff --git a/frontends/src/noir/test_folder/test_circuit/Nargo.toml b/experimental-frontends/src/noir/test_folder/test_circuit/Nargo.toml similarity index 100% rename from frontends/src/noir/test_folder/test_circuit/Nargo.toml rename to experimental-frontends/src/noir/test_folder/test_circuit/Nargo.toml diff --git a/frontends/src/noir/test_folder/test_circuit/src/main.nr b/experimental-frontends/src/noir/test_folder/test_circuit/src/main.nr similarity index 100% rename from frontends/src/noir/test_folder/test_circuit/src/main.nr rename to experimental-frontends/src/noir/test_folder/test_circuit/src/main.nr diff --git a/frontends/src/noir/test_folder/test_mimc/Nargo.toml b/experimental-frontends/src/noir/test_folder/test_mimc/Nargo.toml similarity index 100% rename from frontends/src/noir/test_folder/test_mimc/Nargo.toml rename to experimental-frontends/src/noir/test_folder/test_mimc/Nargo.toml diff --git a/frontends/src/noir/test_folder/test_mimc/src/main.nr b/experimental-frontends/src/noir/test_folder/test_mimc/src/main.nr similarity index 100% rename from frontends/src/noir/test_folder/test_mimc/src/main.nr rename to experimental-frontends/src/noir/test_folder/test_mimc/src/main.nr diff --git a/frontends/src/noir/test_folder/test_no_external_inputs/Nargo.toml b/experimental-frontends/src/noir/test_folder/test_no_external_inputs/Nargo.toml similarity index 100% rename from frontends/src/noir/test_folder/test_no_external_inputs/Nargo.toml rename to experimental-frontends/src/noir/test_folder/test_no_external_inputs/Nargo.toml diff --git a/frontends/src/noir/test_folder/test_no_external_inputs/src/main.nr b/experimental-frontends/src/noir/test_folder/test_no_external_inputs/src/main.nr similarity index 100% rename from frontends/src/noir/test_folder/test_no_external_inputs/src/main.nr rename to experimental-frontends/src/noir/test_folder/test_no_external_inputs/src/main.nr diff --git a/experimental-frontends/src/noname/bridge.rs b/experimental-frontends/src/noname/bridge.rs new file mode 100644 index 00000000..9d285fa9 --- /dev/null +++ b/experimental-frontends/src/noname/bridge.rs @@ -0,0 +1,124 @@ +// From https://github.com/dmpierre/ark-noname/tree/feat/sonobe-integration +use std::collections::HashMap; + +use ark_ff::PrimeField; +use ark_r1cs_std::fields::fp::FpVar; +use ark_relations::r1cs::{ + ConstraintSynthesizer, ConstraintSystemRef, LinearCombination, SynthesisError, Variable, +}; +use noname::backends::{ + r1cs::{GeneratedWitness, LinearCombination as NoNameLinearCombination, R1CS}, + BackendField, +}; +use noname::witness::CompiledCircuit; +use num_bigint::BigUint; + +pub struct NonameSonobeCircuit<'a, 'b, 'c, F: PrimeField, BF: BackendField> { + pub compiled_circuit: CompiledCircuit>, + pub witness: GeneratedWitness, + pub assigned_z_i: &'a Vec>, + pub assigned_external_inputs: &'b Vec>, + pub assigned_z_i1: &'c Vec>, +} + +impl<'a, 'b, 'c, F: PrimeField, BF: BackendField> ConstraintSynthesizer + for NonameSonobeCircuit<'a, 'b, 'c, F, BF> +{ + fn generate_constraints(self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { + let public_io_length = self.assigned_z_i.len() * 2; + let external_inputs_len = self.assigned_external_inputs.len(); + + // we need to map noname r1cs indexes with sonobe + let mut idx_to_var = HashMap::new(); + + // for both the z_i, z_i1 vectors, we assume that they have been assigned in the order + // with which it will appear in the witness + let mut z_i_pointer = 0; + let mut z_i1_pointer = 0; + let mut external_inputs_pointer = 0; + + // arkworks assigns by default the 1 constant + // assumes witness is: [1, public_outputs, public_inputs, private_inputs, aux] + let witness_size = self.witness.witness.len(); + for idx in 1..witness_size { + if idx <= public_io_length { + if idx <= self.assigned_z_i.len() { + // in noname public outputs come first + // we are in the case of public outputs (z_i1 vector) + // those have already been assigned at specific indexes by sonobe + let var = match &self.assigned_z_i1[z_i1_pointer] { + FpVar::Var(allocated_fp) => allocated_fp.variable, + _ => return Err(SynthesisError::Unsatisfiable), + }; + idx_to_var.insert(idx, var); + z_i1_pointer += 1; + } else { + // we are in the case of public inputs (z_i values) + // those have already been assigned at specific indexes by sonobe + let var = match &self.assigned_z_i[z_i_pointer] { + FpVar::Var(allocated_fp) => allocated_fp.variable, + _ => return Err(SynthesisError::Unsatisfiable), + }; + idx_to_var.insert(idx, var); + z_i_pointer += 1; + } + } else if idx <= public_io_length + external_inputs_len { + // we are in the case of external inputs + // those have already been assigned at specific indexes + let var = match &self.assigned_external_inputs[external_inputs_pointer] { + FpVar::Var(allocated_fp) => allocated_fp.variable, + _ => return Err(SynthesisError::Unsatisfiable), + }; + idx_to_var.insert(idx, var); + external_inputs_pointer += 1; + } else { + // we are in the case of auxiliary private inputs + // we need to assign those + let value: BigUint = Into::into(self.witness.witness[idx]); + let field_element = F::from(value); + let var = cs.new_witness_variable(|| Ok(field_element))?; + idx_to_var.insert(idx, var); + } + } + + if (z_i_pointer != self.assigned_z_i.len()) + || (external_inputs_pointer != self.assigned_external_inputs.len()) + { + return Err(SynthesisError::AssignmentMissing); + } + let make_index = |index: usize| match index == 0 { + true => Ok(Variable::One), + false => { + let var = idx_to_var + .get(&index) + .ok_or(SynthesisError::AssignmentMissing)?; + Ok(var.to_owned()) + } + }; + + let make_lc = |lc_data: NoNameLinearCombination| { + let mut lc = LinearCombination::::zero(); + for (cellvar, coeff) in lc_data.terms.into_iter() { + let idx = make_index(cellvar.index)?; + let coeff = F::from(Into::::into(coeff)); + + lc += (coeff, idx) + } + + // add constant + let constant = F::from(Into::::into(lc_data.constant)); + lc += (constant, make_index(0)?); + Ok(lc) + }; + + for constraint in self.compiled_circuit.circuit.backend.constraints { + cs.enforce_constraint( + make_lc(constraint.a)?, + make_lc(constraint.b)?, + make_lc(constraint.c)?, + )?; + } + + Ok(()) + } +} diff --git a/frontends/src/noname/mod.rs b/experimental-frontends/src/noname/mod.rs similarity index 57% rename from frontends/src/noname/mod.rs rename to experimental-frontends/src/noname/mod.rs index 0de41ebf..897b49b0 100644 --- a/frontends/src/noname/mod.rs +++ b/experimental-frontends/src/noname/mod.rs @@ -1,37 +1,41 @@ -use ark_noname::sonobe::NonameSonobeCircuit; +use ark_ff::PrimeField; use ark_r1cs_std::alloc::AllocVar; use ark_r1cs_std::fields::fp::FpVar; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; +use noname::backends::{r1cs::R1CS as R1CSNoname, BackendField}; +use noname::witness::CompiledCircuit; use num_bigint::BigUint; use std::marker::PhantomData; -use self::utils::NonameInputs; - -use ark_ff::PrimeField; -use ark_noname::utils::compile_source_code; use folding_schemes::{frontend::FCircuit, Error}; -use noname::backends::{r1cs::R1CS as R1CSNoname, BackendField}; -use noname::witness::CompiledCircuit; + +pub mod bridge; pub mod utils; +use crate::utils::{VecF, VecFpVar}; + +use self::bridge::NonameSonobeCircuit; +use self::utils::{compile_source_code, NonameInputs}; + +// `L` indicates the length of the ExternalInputs vector of field elements. #[derive(Debug, Clone)] -pub struct NonameFCircuit { +pub struct NonameFCircuit { pub state_len: usize, - pub external_inputs_len: usize, pub circuit: CompiledCircuit>, _f: PhantomData, } -impl FCircuit for NonameFCircuit { - type Params = (String, usize, usize); +impl FCircuit for NonameFCircuit { + type Params = (String, usize); + type ExternalInputs = VecF; + type ExternalInputsVar = VecFpVar; fn new(params: Self::Params) -> Result { - let (code, state_len, external_inputs_len) = params; + let (code, state_len) = params; let compiled_circuit = compile_source_code::(&code).map_err(|_| { Error::Other("Encountered an error while compiling a noname circuit".to_owned()) })?; Ok(NonameFCircuit { state_len, - external_inputs_len, circuit: compiled_circuit, _f: PhantomData, }) @@ -41,45 +45,15 @@ impl FCircuit for NonameFCircuit { self.state_len } - fn external_inputs_len(&self) -> usize { - self.external_inputs_len - } - - fn step_native( - &self, - _i: usize, - z_i: Vec, - external_inputs: Vec, - ) -> Result, Error> { - let wtns_external_inputs = - NonameInputs::from((&external_inputs, "external_inputs".to_string())); - let wtns_ivc_inputs = NonameInputs::from((&z_i, "ivc_inputs".to_string())); - - let noname_witness = self - .circuit - .generate_witness(wtns_ivc_inputs.0, wtns_external_inputs.0) - .map_err(|e| Error::WitnessCalculationError(e.to_string()))?; - - let z_i1_end_index = z_i.len() + 1; - let assigned_z_i1 = (1..z_i1_end_index) - .map(|idx| { - let value: BigUint = Into::into(noname_witness.witness[idx]); - F::from(value) - }) - .collect(); - - Ok(assigned_z_i1) - } - fn generate_step_constraints( &self, cs: ConstraintSystemRef, _i: usize, z_i: Vec>, - external_inputs: Vec>, + external_inputs: Self::ExternalInputsVar, ) -> Result>, SynthesisError> { let wtns_external_inputs = - NonameInputs::from_fpvars((&external_inputs, "external_inputs".to_string()))?; + NonameInputs::from_fpvars((&external_inputs.0, "external_inputs".to_string()))?; let wtns_ivc_inputs = NonameInputs::from_fpvars((&z_i, "ivc_inputs".to_string()))?; let noname_witness = self .circuit @@ -102,7 +76,7 @@ impl FCircuit for NonameFCircuit { compiled_circuit: self.circuit.clone(), witness: noname_witness, assigned_z_i: &z_i, - assigned_external_inputs: &external_inputs, + assigned_external_inputs: &external_inputs.0, assigned_z_i1: &assigned_z_i1, }; noname_circuit.generate_constraints(cs.clone())?; @@ -113,15 +87,24 @@ impl FCircuit for NonameFCircuit { #[cfg(test)] mod tests { - use ark_bn254::Fr; + use ark_ff::PrimeField; use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar}; + use ark_relations::r1cs::ConstraintSystem; use noname::backends::r1cs::R1csBn254Field; - use folding_schemes::frontend::FCircuit; + use folding_schemes::{frontend::FCircuit, Error}; use super::NonameFCircuit; - use ark_relations::r1cs::ConstraintSystem; + use crate::utils::VecFpVar; + + /// Native implementation of `NONAME_CIRCUIT_EXTERNAL_INPUTS` + fn external_inputs_step_native(z_i: Vec, external_inputs: Vec) -> Vec { + let xx = external_inputs[0] + z_i[0]; + let yy = external_inputs[1] * z_i[1]; + assert_eq!(yy, xx); + vec![xx, yy] + } const NONAME_CIRCUIT_EXTERNAL_INPUTS: &str = "fn main(pub ivc_inputs: [Field; 2], external_inputs: [Field; 2]) -> [Field; 2] { @@ -138,63 +121,68 @@ mod tests { }"; #[test] - fn test_step_native() { + fn test_step_native() -> Result<(), Error> { let cs = ConstraintSystem::::new_ref(); - let params = (NONAME_CIRCUIT_EXTERNAL_INPUTS.to_owned(), 2, 2); - let circuit = NonameFCircuit::::new(params).unwrap(); + // state length = 2, external inputs length= 2 + let params = (NONAME_CIRCUIT_EXTERNAL_INPUTS.to_owned(), 2); + let circuit = NonameFCircuit::::new(params)?; let inputs_public = vec![Fr::from(2), Fr::from(5)]; let inputs_private = vec![Fr::from(8), Fr::from(2)]; let ivc_inputs_var = - Vec::>::new_witness(cs.clone(), || Ok(inputs_public.clone())).unwrap(); + Vec::>::new_witness(cs.clone(), || Ok(inputs_public.clone()))?; let external_inputs_var = - Vec::>::new_witness(cs.clone(), || Ok(inputs_private.clone())).unwrap(); - - let z_i1 = circuit - .generate_step_constraints(cs.clone(), 0, ivc_inputs_var, external_inputs_var) - .unwrap(); - let z_i1_native = circuit - .step_native(0, inputs_public, inputs_private) - .unwrap(); - - assert_eq!(z_i1[0].value().unwrap(), z_i1_native[0]); - assert_eq!(z_i1[1].value().unwrap(), z_i1_native[1]); + Vec::>::new_witness(cs.clone(), || Ok(inputs_private.clone()))?; + + let z_i1 = circuit.generate_step_constraints( + cs.clone(), + 0, + ivc_inputs_var, + VecFpVar(external_inputs_var), + )?; + let z_i1_native = external_inputs_step_native(inputs_public, inputs_private); + + assert_eq!(z_i1[0].value()?, z_i1_native[0]); + assert_eq!(z_i1[1].value()?, z_i1_native[1]); + Ok(()) } #[test] - fn test_step_constraints() { + fn test_step_constraints() -> Result<(), Error> { let cs = ConstraintSystem::::new_ref(); - let params = (NONAME_CIRCUIT_EXTERNAL_INPUTS.to_owned(), 2, 2); - let circuit = NonameFCircuit::::new(params).unwrap(); + // external inputs length= 2 + let params = (NONAME_CIRCUIT_EXTERNAL_INPUTS.to_owned(), 2); + let circuit = NonameFCircuit::::new(params)?; let inputs_public = vec![Fr::from(2), Fr::from(5)]; let inputs_private = vec![Fr::from(8), Fr::from(2)]; - let ivc_inputs_var = - Vec::>::new_witness(cs.clone(), || Ok(inputs_public)).unwrap(); - let external_inputs_var = - Vec::>::new_witness(cs.clone(), || Ok(inputs_private)).unwrap(); - - let z_i1 = circuit - .generate_step_constraints(cs.clone(), 0, ivc_inputs_var, external_inputs_var) - .unwrap(); - assert!(cs.is_satisfied().unwrap()); - assert_eq!(z_i1[0].value().unwrap(), Fr::from(10_u8)); - assert_eq!(z_i1[1].value().unwrap(), Fr::from(10_u8)); + let ivc_inputs_var = Vec::>::new_witness(cs.clone(), || Ok(inputs_public))?; + let external_inputs_var = Vec::>::new_witness(cs.clone(), || Ok(inputs_private))?; + + let z_i1 = circuit.generate_step_constraints( + cs.clone(), + 0, + ivc_inputs_var, + VecFpVar(external_inputs_var), + )?; + assert!(cs.is_satisfied()?); + assert_eq!(z_i1[0].value()?, Fr::from(10_u8)); + assert_eq!(z_i1[1].value()?, Fr::from(10_u8)); + Ok(()) } #[test] - fn test_generate_constraints_no_external_inputs() { + fn test_generate_constraints_no_external_inputs() -> Result<(), Error> { let cs = ConstraintSystem::::new_ref(); - let params = (NONAME_CIRCUIT_NO_EXTERNAL_INPUTS.to_owned(), 2, 0); + let params = (NONAME_CIRCUIT_NO_EXTERNAL_INPUTS.to_owned(), 2); // state length = 2 let inputs_public = vec![Fr::from(2), Fr::from(5)]; - let ivc_inputs_var = - Vec::>::new_witness(cs.clone(), || Ok(inputs_public)).unwrap(); + let ivc_inputs_var = Vec::>::new_witness(cs.clone(), || Ok(inputs_public))?; - let f_circuit = NonameFCircuit::::new(params).unwrap(); - f_circuit - .generate_step_constraints(cs.clone(), 0, ivc_inputs_var, vec![]) - .unwrap(); - assert!(cs.is_satisfied().unwrap()); + // external inputs length = 0 + let f_circuit = NonameFCircuit::::new(params)?; + f_circuit.generate_step_constraints(cs.clone(), 0, ivc_inputs_var, VecFpVar(vec![]))?; + assert!(cs.is_satisfied()?); + Ok(()) } } diff --git a/frontends/src/noname/utils.rs b/experimental-frontends/src/noname/utils.rs similarity index 66% rename from frontends/src/noname/utils.rs rename to experimental-frontends/src/noname/utils.rs index fdd92817..fc5761cf 100644 --- a/frontends/src/noname/utils.rs +++ b/experimental-frontends/src/noname/utils.rs @@ -3,7 +3,14 @@ use std::collections::HashMap; use ark_ff::PrimeField; use ark_r1cs_std::{fields::fp::FpVar, R1CSVar}; use ark_relations::r1cs::SynthesisError; -use noname::inputs::JsonInputs; +use noname::{ + backends::{r1cs::R1CS, BackendField}, + circuit_writer::CircuitWriter, + compiler::{typecheck_next_file, Sources}, + inputs::JsonInputs, + type_checker::TypeChecker, + witness::CompiledCircuit, +}; use serde_json::json; pub struct NonameInputs(pub JsonInputs); @@ -56,3 +63,26 @@ impl NonameInputs { } } } + +// from: https://github.com/zksecurity/noname/blob/main/src/tests/modules.rs +// TODO: this will not work in the case where we are using libraries +pub fn compile_source_code( + code: &str, +) -> Result>, noname::error::Error> { + let mut sources = Sources::new(); + + // parse the transitive dependency + let mut checker = TypeChecker::>::new(); + let _ = typecheck_next_file( + &mut checker, + None, + &mut sources, + "main.no".to_string(), + code.to_string(), + 0, + ) + .unwrap(); + let r1cs = R1CS::::new(); + // compile + CircuitWriter::generate_circuit(checker, r1cs) +} diff --git a/experimental-frontends/src/utils.rs b/experimental-frontends/src/utils.rs new file mode 100644 index 00000000..769c5ae7 --- /dev/null +++ b/experimental-frontends/src/utils.rs @@ -0,0 +1,38 @@ +use ark_ff::PrimeField; +use ark_r1cs_std::{ + alloc::{AllocVar, AllocationMode}, + fields::fp::FpVar, +}; +use ark_relations::r1cs::{Namespace, SynthesisError}; +use ark_std::fmt::Debug; +use core::borrow::Borrow; + +#[derive(Clone, Debug)] +pub struct VecF(pub Vec); +impl Default for VecF { + fn default() -> Self { + VecF(vec![F::zero(); L]) + } +} +#[derive(Clone, Debug)] +pub struct VecFpVar(pub Vec>); +impl AllocVar, F> for VecFpVar { + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + f().and_then(|val| { + let cs = cs.into(); + + let v = Vec::>::new_variable(cs.clone(), || Ok(val.borrow().0.clone()), mode)?; + + Ok(VecFpVar(v)) + }) + } +} +impl Default for VecFpVar { + fn default() -> Self { + VecFpVar(vec![FpVar::::Constant(F::zero()); L]) + } +} diff --git a/folding-schemes/Cargo.toml b/folding-schemes/Cargo.toml index 22b074de..ccdc9b10 100644 --- a/folding-schemes/Cargo.toml +++ b/folding-schemes/Cargo.toml @@ -4,20 +4,19 @@ version = "0.1.0" edition = "2021" [dependencies] -ark-ec = { version = "^0.4.0", default-features = false, features = ["parallel"] } -ark-ff = { version = "^0.4.0", default-features = false, features = ["parallel", "asm"] } -ark-poly = { version = "^0.4.0", default-features = false, features = ["parallel"] } -ark-std = { version = "^0.4.0", default-features = false, features = ["parallel"] } -ark-crypto-primitives = { version = "^0.4.0", default-features = false, features = ["r1cs", "sponge", "crh", "parallel"] } -ark-poly-commit = { version = "^0.4.0", default-features = false, features = ["parallel"] } -ark-relations = { version = "^0.4.0", default-features = false } -# ark-r1cs-std is patched at the workspace level -ark-r1cs-std = { version = "0.4.0", default-features = false, features = ["parallel"] } -ark-snark = { version = "^0.4.0", default-features = false } -ark-serialize = { version = "^0.4.0", default-features = false } -ark-groth16 = { version = "^0.4.0", default-features = false, features = ["parallel"]} -ark-bn254 = { version = "^0.4.0", default-features = false } -ark-grumpkin = { version = "0.4.0", default-features = false } +ark-ec = { version = "^0.5.0", default-features = false, features = ["parallel"] } +ark-ff = { version = "^0.5.0", default-features = false, features = ["parallel", "asm"] } +ark-poly = { version = "^0.5.0", default-features = false, features = ["parallel"] } +ark-std = { version = "^0.5.0", default-features = false, features = ["parallel"] } +ark-crypto-primitives = { version = "^0.5.0", default-features = false, features = ["r1cs", "sponge", "crh", "parallel"] } +ark-poly-commit = { version = "^0.5.0", default-features = false, features = ["parallel"] } +ark-relations = { version = "^0.5.0", default-features = false } +ark-r1cs-std = { version = "^0.5.0", default-features = false, features = ["parallel"] } +ark-snark = { version = "^0.5.0", default-features = false } +ark-serialize = { version = "^0.5.0", default-features = false } +ark-groth16 = { version = "^0.5.0", default-features = false, features = ["parallel"]} +ark-bn254 = { version = "^0.5.0", default-features = false } +ark-grumpkin = { version = "^0.5.0", default-features = false } thiserror = "1.0" rayon = "1" num-bigint = "0.4" @@ -25,23 +24,24 @@ num-integer = "0.1" sha3 = "0.10" log = "0.4" -# tmp import for espresso's sumcheck -espresso_subroutines = {git="https://github.com/EspressoSystems/hyperplonk", package="subroutines"} - [dev-dependencies] -ark-pallas = {version="0.4.0", features=["r1cs"]} -ark-vesta = {version="0.4.0", features=["r1cs"]} -ark-bn254 = {version="0.4.0", features=["r1cs"]} -ark-grumpkin = {version="0.4.0", features=["r1cs"]} +ark-pallas = {version="^0.5.0", features=["r1cs"]} +ark-vesta = {version="^0.5.0", features=["r1cs"]} +ark-bn254 = {version="^0.5.0", features=["r1cs"]} +ark-grumpkin = {version="^0.5.0", features=["r1cs"]} # Note: do not use the MNTx_298 curves in practice due security reasons, here # we only use them in the tests. -ark-mnt4-298 = {version="0.4.0", features=["r1cs"]} -ark-mnt6-298 = {version="0.4.0", features=["r1cs"]} +ark-mnt4-298 = {version="^0.5.0", features=["r1cs"]} +ark-mnt6-298 = {version="^0.5.0", features=["r1cs"]} rand = "0.8.5" num-bigint = {version = "0.4", features = ["rand"]} tracing = { version = "0.1", default-features = false, features = [ "attributes" ] } tracing-subscriber = { version = "0.2" } +# for benchmarks +criterion = "0.5" +pprof = { version = "0.13", features = ["criterion", "flamegraph"] } + # This allows the crate to be built when targeting WASM. # See more at: https://docs.rs/getrandom/#webassembly-support [target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] @@ -53,6 +53,21 @@ parallel = [] light-test = [] +[[bench]] +name = "nova" +path = "../benches/nova.rs" +harness = false + +[[bench]] +name = "hypernova" +path = "../benches/hypernova.rs" +harness = false + +[[bench]] +name = "protogalaxy" +path = "../benches/protogalaxy.rs" +harness = false + [[example]] name = "sha256" path = "../examples/sha256.rs" diff --git a/folding-schemes/src/arith/ccs/circuits.rs b/folding-schemes/src/arith/ccs/circuits.rs index 01d7f3dd..5a2e99ab 100644 --- a/folding-schemes/src/arith/ccs/circuits.rs +++ b/folding-schemes/src/arith/ccs/circuits.rs @@ -7,8 +7,8 @@ use ark_r1cs_std::{ }; use ark_relations::r1cs::{Namespace, SynthesisError}; use ark_std::borrow::Borrow; + /// CCSMatricesVar contains the matrices 'M' of the CCS without the rest of CCS parameters. -/// #[derive(Debug, Clone)] pub struct CCSMatricesVar { // we only need native representation, so the constraint field==F diff --git a/folding-schemes/src/arith/ccs/mod.rs b/folding-schemes/src/arith/ccs/mod.rs index eba51c4a..dda2434d 100644 --- a/folding-schemes/src/arith/ccs/mod.rs +++ b/folding-schemes/src/arith/ccs/mod.rs @@ -6,8 +6,8 @@ use crate::utils::vec::{ }; use crate::Error; -use super::ArithSerializer; -use super::{r1cs::R1CS, Arith}; +use super::{r1cs::R1CS, ArithRelation}; +use super::{Arith, ArithSerializer}; pub mod circuits; @@ -16,21 +16,19 @@ pub mod circuits; #[derive(Debug, Clone, Eq, PartialEq)] pub struct CCS { /// m: number of rows in M_i (such that M_i \in F^{m, n}) - pub m: usize, + m: usize, /// n = |z|, number of cols in M_i - pub n: usize, + n: usize, /// l = |io|, size of public input/output - pub l: usize, + l: usize, /// t = |M|, number of matrices pub t: usize, /// q = |c| = |S|, number of multisets - pub q: usize, + q: usize, /// d: max degree in each variable - pub d: usize, + d: usize, /// s = log(m), dimension of x pub s: usize, - /// s_prime = log(n), dimension of y - pub s_prime: usize, /// vector of matrices pub M: Vec>, @@ -64,14 +62,40 @@ impl CCS { Ok(result) } +} + +impl Arith for CCS { + #[inline] + fn degree(&self) -> usize { + self.d + } + + #[inline] + fn n_constraints(&self) -> usize { + self.m + } + + #[inline] + fn n_variables(&self) -> usize { + self.n + } + + #[inline] + fn n_public_inputs(&self) -> usize { + self.l + } + + #[inline] + fn n_witnesses(&self) -> usize { + self.n_variables() - self.n_public_inputs() - 1 + } - /// returns a tuple containing (w, x) (witness and public inputs respectively) - pub fn split_z(&self, z: &[F]) -> (Vec, Vec) { + fn split_z(&self, z: &[P]) -> (Vec

, Vec

) { (z[self.l + 1..].to_vec(), z[1..self.l + 1].to_vec()) } } -impl, U: AsRef<[F]>> Arith for CCS { +impl, U: AsRef<[F]>> ArithRelation for CCS { type Evaluation = Vec; fn eval_relation(&self, w: &W, u: &U) -> Result { @@ -99,17 +123,16 @@ impl ArithSerializer for CCS { impl From> for CCS { fn from(r1cs: R1CS) -> Self { - let m = r1cs.num_constraints(); - let n = r1cs.num_variables(); + let m = r1cs.n_constraints(); + let n = r1cs.n_variables(); CCS { m, n, - l: r1cs.num_public_inputs(), + l: r1cs.n_public_inputs(), s: log2(m) as usize, - s_prime: log2(n) as usize, t: 3, q: 2, - d: 2, + d: r1cs.degree(), S: vec![vec![0, 1], vec![2]], c: vec![F::one(), F::one().neg()], @@ -135,24 +158,26 @@ pub mod tests { } #[test] - fn test_eval_ccs_relation() { + fn test_eval_ccs_relation() -> Result<(), Error> { let ccs = get_test_ccs::(); let (_, x, mut w) = get_test_z_split(3); - let f_w = ccs.eval_relation(&w, &x).unwrap(); + let f_w = ccs.eval_relation(&w, &x)?; assert!(is_zero_vec(&f_w)); w[1] = Fr::from(111); - let f_w = ccs.eval_relation(&w, &x).unwrap(); + let f_w = ccs.eval_relation(&w, &x)?; assert!(!is_zero_vec(&f_w)); + Ok(()) } /// Test that a basic CCS relation can be satisfied #[test] - fn test_check_ccs_relation() { + fn test_check_ccs_relation() -> Result<(), Error> { let ccs = get_test_ccs::(); let (_, x, w) = get_test_z_split(3); - ccs.check_relation(&w, &x).unwrap(); + ccs.check_relation(&w, &x)?; + Ok(()) } } diff --git a/folding-schemes/src/arith/mod.rs b/folding-schemes/src/arith/mod.rs index b5ebd7b8..a280422f 100644 --- a/folding-schemes/src/arith/mod.rs +++ b/folding-schemes/src/arith/mod.rs @@ -1,17 +1,39 @@ -use ark_ec::CurveGroup; +use ark_ff::PrimeField; use ark_relations::r1cs::SynthesisError; use ark_std::rand::RngCore; -use crate::{commitment::CommitmentScheme, folding::traits::Dummy, Error}; +use crate::{commitment::CommitmentScheme, folding::traits::Dummy, Curve, Error}; pub mod ccs; pub mod r1cs; -/// `Arith` defines the operations that a constraint system (e.g., R1CS, CCS, -/// etc.) should support. +/// [`Arith`] is a trait about constraint systems (R1CS, CCS, etc.), where we +/// define methods for getting information about the constraint system. +pub trait Arith: Clone { + /// Returns the degree of the constraint system + fn degree(&self) -> usize; + + /// Returns the number of constraints in the constraint system + fn n_constraints(&self) -> usize; + + /// Returns the number of variables in the constraint system + fn n_variables(&self) -> usize; + + /// Returns the number of public inputs / public IO / instances / statements + /// in the constraint system + fn n_public_inputs(&self) -> usize; + + /// Returns the number of witnesses / secret inputs in the constraint system + fn n_witnesses(&self) -> usize; + + /// Returns a tuple containing (w, x) (witness and public inputs respectively) + fn split_z(&self, z: &[F]) -> (Vec, Vec); +} + +/// `ArithRelation` *treats a constraint system as a relation* between a witness +/// of type `W` and a statement / public input / public IO / instance of type +/// `U`, and in this trait, we define the necessary operations on the relation. /// -/// Here, `W` is the type of witness, and `U` is the type of statement / public -/// input / public IO / instance. /// Note that the same constraint system may support different types of `W` and /// `U`, and the satisfiability check may vary. /// @@ -41,7 +63,7 @@ pub mod r1cs; /// This is also the case of CCS, where `W` and `U` may be vectors of field /// elements, [`crate::folding::hypernova::Witness`] and [`crate::folding::hypernova::lcccs::LCCCS`], /// or [`crate::folding::hypernova::Witness`] and [`crate::folding::hypernova::cccs::CCCS`]. -pub trait Arith: Clone { +pub trait ArithRelation: Arith { type Evaluation; /// Returns a dummy witness and instance @@ -123,7 +145,7 @@ pub trait ArithSerializer { /// in a plain R1CS. /// /// [HyperNova]: https://eprint.iacr.org/2023/573.pdf -pub trait ArithSampler: Arith { +pub trait ArithSampler: ArithRelation { /// Samples a random witness and instance that satisfy the constraint system. fn sample_witness_instance>( &self, @@ -132,9 +154,9 @@ pub trait ArithSampler: Arith { ) -> Result<(W, U), Error>; } -/// `ArithGadget` defines the in-circuit counterparts of operations specified in -/// `Arith` on constraint systems. -pub trait ArithGadget { +/// `ArithRelationGadget` defines the in-circuit counterparts of operations +/// specified in `ArithRelation` on constraint systems. +pub trait ArithRelationGadget { type Evaluation; /// Evaluates the constraint system `self` at witness `w` and instance `u`. diff --git a/folding-schemes/src/arith/r1cs/circuits.rs b/folding-schemes/src/arith/r1cs/circuits.rs index e951ab87..eaf4d2c9 100644 --- a/folding-schemes/src/arith/r1cs/circuits.rs +++ b/folding-schemes/src/arith/r1cs/circuits.rs @@ -1,5 +1,5 @@ use crate::{ - arith::ArithGadget, + arith::ArithRelationGadget, utils::gadgets::{EquivalenceGadget, MatrixGadget, SparseMatrixVar, VectorGadget}, }; use ark_ff::PrimeField; @@ -59,7 +59,7 @@ where } } -impl, UVar: AsRef<[FVar]>> ArithGadget +impl, UVar: AsRef<[FVar]>> ArithRelationGadget for R1CSMatricesVar where SparseMatrixVar: MatrixGadget, @@ -86,8 +86,6 @@ where #[cfg(test)] pub mod tests { - use std::cmp::max; - use ark_crypto_primitives::crh::{ sha256::{ constraints::{Sha256Gadget, UnitVar}, @@ -95,16 +93,17 @@ pub mod tests { }, CRHScheme, CRHSchemeGadget, }; - use ark_ec::CurveGroup; + use ark_ff::BigInteger; use ark_pallas::{Fq, Fr, Projective}; - use ark_r1cs_std::{bits::uint8::UInt8, eq::EqGadget, fields::fp::FpVar}; + use ark_r1cs_std::{eq::EqGadget, fields::fp::FpVar, uint8::UInt8}; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef}; use ark_std::{ + cmp::max, rand::{thread_rng, Rng}, One, UniformRand, }; - use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_vesta::Projective as Projective2; use super::*; use crate::arith::{ @@ -112,7 +111,7 @@ pub mod tests { extract_r1cs, extract_w_x, tests::{get_test_r1cs, get_test_z}, }, - Arith, + Arith, ArithRelation, }; use crate::commitment::{pedersen::Pedersen, CommitmentScheme}; use crate::folding::{ @@ -126,92 +125,95 @@ pub mod tests { }, }; use crate::frontend::{ - utils::{CubicFCircuit, CustomFCircuit, WrapperCircuit}, + utils::{ + cubic_step_native, custom_step_native, CubicFCircuit, CustomFCircuit, WrapperCircuit, + }, FCircuit, }; + use crate::{Curve, Error}; - pub fn prepare_instances, R: Rng>( + fn prepare_instances, R: Rng>( mut rng: R, r1cs: &R1CS, z: &[C::ScalarField], - ) -> (Witness, CommittedInstance) { + ) -> Result<(Witness, CommittedInstance), Error> { let (w, x) = r1cs.split_z(z); - let (cs_pp, _) = CS::setup(&mut rng, max(w.len(), r1cs.A.n_rows)).unwrap(); + let (cs_pp, _) = CS::setup(&mut rng, max(w.len(), r1cs.A.n_rows))?; let mut w = Witness::new::(w, r1cs.A.n_rows, &mut rng); - w.E = r1cs.eval_at_z(z).unwrap(); - let mut u = w.commit::(&cs_pp, x).unwrap(); + w.E = r1cs.eval_at_z(z)?; + let mut u = w.commit::(&cs_pp, x)?; u.u = z[0]; - (w, u) + Ok((w, u)) } #[test] - fn test_relaxed_r1cs_small_gadget_handcrafted() { + fn test_relaxed_r1cs_small_gadget_handcrafted() -> Result<(), Error> { let rng = &mut thread_rng(); let r1cs: R1CS = get_test_r1cs(); let mut z = get_test_z(3); z[0] = Fr::rand(rng); - let (w, u) = prepare_instances::<_, Pedersen, _>(rng, &r1cs, &z); + let (w, u) = prepare_instances::<_, Pedersen, _>(rng, &r1cs, &z)?; let cs = ConstraintSystem::::new_ref(); - let wVar = WitnessVar::new_witness(cs.clone(), || Ok(w)).unwrap(); - let uVar = CommittedInstanceVar::new_witness(cs.clone(), || Ok(u)).unwrap(); - let r1csVar = - R1CSMatricesVar::>::new_witness(cs.clone(), || Ok(r1cs)).unwrap(); + let wVar = WitnessVar::new_witness(cs.clone(), || Ok(w))?; + let uVar = CommittedInstanceVar::new_witness(cs.clone(), || Ok(u))?; + let r1csVar = R1CSMatricesVar::>::new_witness(cs.clone(), || Ok(r1cs))?; - r1csVar.enforce_relation(&wVar, &uVar).unwrap(); - assert!(cs.is_satisfied().unwrap()); + r1csVar.enforce_relation(&wVar, &uVar)?; + assert!(cs.is_satisfied()?); + Ok(()) } // gets as input a circuit that implements the ConstraintSynthesizer trait, and that has been // initialized. - fn test_relaxed_r1cs_gadget>(circuit: CS) { + fn test_relaxed_r1cs_gadget>(circuit: CS) -> Result<(), Error> { let rng = &mut thread_rng(); let cs = ConstraintSystem::::new_ref(); - circuit.generate_constraints(cs.clone()).unwrap(); + circuit.generate_constraints(cs.clone())?; cs.finalize(); - assert!(cs.is_satisfied().unwrap()); + assert!(cs.is_satisfied()?); - let cs = cs.into_inner().unwrap(); + let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; - let r1cs = extract_r1cs::(&cs).unwrap(); + let r1cs = extract_r1cs::(&cs)?; let (w, x) = extract_w_x::(&cs); - r1cs.check_relation(&w, &x).unwrap(); + r1cs.check_relation(&w, &x)?; let mut z = [vec![Fr::one()], x, w].concat(); z[0] = Fr::rand(rng); - let (w, u) = prepare_instances::<_, Pedersen, _>(rng, &r1cs, &z); - r1cs.check_relation(&w, &u).unwrap(); + let (w, u) = prepare_instances::<_, Pedersen, _>(rng, &r1cs, &z)?; + r1cs.check_relation(&w, &u)?; // set new CS for the circuit that checks the RelaxedR1CS of our original circuit let cs = ConstraintSystem::::new_ref(); // prepare the inputs for our circuit - let wVar = WitnessVar::new_witness(cs.clone(), || Ok(w)).unwrap(); - let uVar = CommittedInstanceVar::new_witness(cs.clone(), || Ok(u)).unwrap(); - let r1csVar = - R1CSMatricesVar::>::new_witness(cs.clone(), || Ok(r1cs)).unwrap(); + let wVar = WitnessVar::new_witness(cs.clone(), || Ok(w))?; + let uVar = CommittedInstanceVar::new_witness(cs.clone(), || Ok(u))?; + let r1csVar = R1CSMatricesVar::>::new_witness(cs.clone(), || Ok(r1cs))?; - r1csVar.enforce_relation(&wVar, &uVar).unwrap(); - assert!(cs.is_satisfied().unwrap()); + r1csVar.enforce_relation(&wVar, &uVar)?; + assert!(cs.is_satisfied()?); + Ok(()) } #[test] - fn test_relaxed_r1cs_small_gadget_arkworks() { + fn test_relaxed_r1cs_small_gadget_arkworks() -> Result<(), Error> { let z_i = vec![Fr::from(3_u32)]; - let cubic_circuit = CubicFCircuit::::new(()).unwrap(); + let cubic_circuit = CubicFCircuit::::new(())?; let circuit = WrapperCircuit::> { FC: cubic_circuit, z_i: Some(z_i.clone()), - z_i1: Some(cubic_circuit.step_native(0, z_i, vec![]).unwrap()), + z_i1: Some(cubic_step_native(z_i)), }; - test_relaxed_r1cs_gadget(circuit); + test_relaxed_r1cs_gadget(circuit) } struct Sha256TestCircuit { @@ -231,71 +233,71 @@ pub mod tests { } } #[test] - fn test_relaxed_r1cs_medium_gadget_arkworks() { + fn test_relaxed_r1cs_medium_gadget_arkworks() -> Result<(), Error> { let x = Fr::from(5_u32).into_bigint().to_bytes_le(); - let y = ::evaluate(&(), x.clone()).unwrap(); + let y = + ::evaluate(&(), x.clone()).map_err(|_| Error::EvaluationFail)?; let circuit = Sha256TestCircuit:: { _f: PhantomData, x, y, }; - test_relaxed_r1cs_gadget(circuit); + test_relaxed_r1cs_gadget(circuit) } #[test] - fn test_relaxed_r1cs_custom_circuit() { + fn test_relaxed_r1cs_custom_circuit() -> Result<(), Error> { let n_constraints = 10_000; - let custom_circuit = CustomFCircuit::::new(n_constraints).unwrap(); + let custom_circuit = CustomFCircuit::::new(n_constraints)?; let z_i = vec![Fr::from(5_u32)]; let circuit = WrapperCircuit::> { FC: custom_circuit, z_i: Some(z_i.clone()), - z_i1: Some(custom_circuit.step_native(0, z_i, vec![]).unwrap()), + z_i1: Some(custom_step_native(z_i, n_constraints)), }; - test_relaxed_r1cs_gadget(circuit); + test_relaxed_r1cs_gadget(circuit) } #[test] - fn test_relaxed_r1cs_nonnative_circuit() { + fn test_relaxed_r1cs_nonnative_circuit() -> Result<(), Error> { + let n_constraints = 10; let rng = &mut thread_rng(); let cs = ConstraintSystem::::new_ref(); // in practice we would use CycleFoldCircuit, but is a very big circuit (when computed // non-natively inside the RelaxedR1CS circuit), so in order to have a short test we use a // custom circuit. - let custom_circuit = CustomFCircuit::::new(10).unwrap(); + let custom_circuit = CustomFCircuit::::new(n_constraints)?; let z_i = vec![Fq::from(5_u32)]; let circuit = WrapperCircuit::> { FC: custom_circuit, z_i: Some(z_i.clone()), - z_i1: Some(custom_circuit.step_native(0, z_i, vec![]).unwrap()), + z_i1: Some(custom_step_native(z_i, n_constraints)), }; - circuit.generate_constraints(cs.clone()).unwrap(); + circuit.generate_constraints(cs.clone())?; cs.finalize(); - let cs = cs.into_inner().unwrap(); - let r1cs = extract_r1cs::(&cs).unwrap(); + let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; + let r1cs = extract_r1cs::(&cs)?; let (w, x) = extract_w_x::(&cs); let z = [vec![Fq::rand(rng)], x, w].concat(); - let (w, u) = prepare_instances::<_, Pedersen, _>(rng, &r1cs, &z); + let (w, u) = prepare_instances::<_, Pedersen, _>(rng, &r1cs, &z)?; // natively let cs = ConstraintSystem::::new_ref(); - let wVar = WitnessVar::new_witness(cs.clone(), || Ok(&w)).unwrap(); - let uVar = CommittedInstanceVar::new_witness(cs.clone(), || Ok(&u)).unwrap(); - let r1csVar = - R1CSMatricesVar::>::new_witness(cs.clone(), || Ok(&r1cs)).unwrap(); - r1csVar.enforce_relation(&wVar, &uVar).unwrap(); + let wVar = WitnessVar::new_witness(cs.clone(), || Ok(&w))?; + let uVar = CommittedInstanceVar::new_witness(cs.clone(), || Ok(&u))?; + let r1csVar = R1CSMatricesVar::>::new_witness(cs.clone(), || Ok(&r1cs))?; + r1csVar.enforce_relation(&wVar, &uVar)?; // non-natively let cs = ConstraintSystem::::new_ref(); - let wVar = CycleFoldWitnessVar::new_witness(cs.clone(), || Ok(w)).unwrap(); - let uVar = - CycleFoldCommittedInstanceVar::<_, GVar2>::new_witness(cs.clone(), || Ok(u)).unwrap(); + let wVar = CycleFoldWitnessVar::new_witness(cs.clone(), || Ok(w))?; + let uVar = CycleFoldCommittedInstanceVar::new_witness(cs.clone(), || Ok(u))?; let r1csVar = - R1CSMatricesVar::>::new_witness(cs.clone(), || Ok(r1cs)) - .unwrap(); - r1csVar.enforce_relation(&wVar, &uVar).unwrap(); + R1CSMatricesVar::>::new_witness(cs.clone(), || Ok(r1cs))?; + r1csVar.enforce_relation(&wVar, &uVar)?; + Ok(()) } } diff --git a/folding-schemes/src/arith/r1cs/mod.rs b/folding-schemes/src/arith/r1cs/mod.rs index 362567de..f6baf8de 100644 --- a/folding-schemes/src/arith/r1cs/mod.rs +++ b/folding-schemes/src/arith/r1cs/mod.rs @@ -4,7 +4,8 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::rand::Rng; use super::ccs::CCS; -use super::{Arith, ArithSerializer}; +use super::{Arith, ArithRelation, ArithSerializer}; +use crate::folding::traits::Dummy; use crate::utils::vec::{ hadamard, is_zero_vec, mat_vec_mul, vec_scalar_mul, vec_sub, SparseMatrix, }; @@ -14,14 +15,14 @@ pub mod circuits; #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] pub struct R1CS { - pub l: usize, // io len + l: usize, // io len pub A: SparseMatrix, pub B: SparseMatrix, pub C: SparseMatrix, } impl R1CS { - /// Evaluates the CCS relation at a given vector of variables `z` + /// Evaluates the R1CS relation at a given vector of variables `z` pub fn eval_at_z(&self, z: &[F]) -> Result, Error> { if z.len() != self.A.n_cols { return Err(Error::NotSameLength( @@ -43,7 +44,38 @@ impl R1CS { } } -impl, U: AsRef<[F]>> Arith for R1CS { +impl Arith for R1CS { + #[inline] + fn degree(&self) -> usize { + 2 + } + + #[inline] + fn n_constraints(&self) -> usize { + self.A.n_rows + } + + #[inline] + fn n_variables(&self) -> usize { + self.A.n_cols + } + + #[inline] + fn n_public_inputs(&self) -> usize { + self.l + } + + #[inline] + fn n_witnesses(&self) -> usize { + self.n_variables() - self.n_public_inputs() - 1 + } + + fn split_z(&self, z: &[P]) -> (Vec

, Vec

) { + (z[self.l + 1..].to_vec(), z[1..self.l + 1].to_vec()) + } +} + +impl, U: AsRef<[F]>> ArithRelation for R1CS { type Evaluation = Vec; fn eval_relation(&self, w: &W, u: &U) -> Result { @@ -66,14 +98,20 @@ impl ArithSerializer for R1CS { } } +impl Dummy<(usize, usize, usize)> for R1CS { + fn dummy((n_constraints, n_variables, n_public_inputs): (usize, usize, usize)) -> Self { + Self { + l: n_public_inputs, + A: SparseMatrix::dummy((n_constraints, n_variables)), + B: SparseMatrix::dummy((n_constraints, n_variables)), + C: SparseMatrix::dummy((n_constraints, n_variables)), + } + } +} + impl R1CS { pub fn empty() -> Self { - R1CS { - l: 0, - A: SparseMatrix::empty(), - B: SparseMatrix::empty(), - C: SparseMatrix::empty(), - } + Self::dummy((0, 0, 0)) } pub fn rand(rng: &mut R, n_rows: usize, n_cols: usize) -> Self { Self { @@ -83,37 +121,12 @@ impl R1CS { C: SparseMatrix::rand(rng, n_rows, n_cols), } } - - #[inline] - pub fn num_constraints(&self) -> usize { - self.A.n_rows - } - - #[inline] - pub fn num_public_inputs(&self) -> usize { - self.l - } - - #[inline] - pub fn num_variables(&self) -> usize { - self.A.n_cols - } - - #[inline] - pub fn num_witnesses(&self) -> usize { - self.num_variables() - self.num_public_inputs() - 1 - } - - /// returns a tuple containing (w, x) (witness and public inputs respectively) - pub fn split_z(&self, z: &[F]) -> (Vec, Vec) { - (z[self.l + 1..].to_vec(), z[1..self.l + 1].to_vec()) - } } impl From> for R1CS { fn from(ccs: CCS) -> Self { R1CS:: { - l: ccs.l, + l: ccs.n_public_inputs(), A: ccs.M[0].clone(), B: ccs.M[1].clone(), C: ccs.M[2].clone(), @@ -232,23 +245,25 @@ pub mod tests { } #[test] - fn test_eval_r1cs_relation() { + fn test_eval_r1cs_relation() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let r1cs = get_test_r1cs::(); let (_, x, mut w) = get_test_z_split::(rng.gen::() as usize); - let f_w = r1cs.eval_relation(&w, &x).unwrap(); + let f_w = r1cs.eval_relation(&w, &x)?; assert!(is_zero_vec(&f_w)); w[1] = Fr::from(111); - let f_w = r1cs.eval_relation(&w, &x).unwrap(); + let f_w = r1cs.eval_relation(&w, &x)?; assert!(!is_zero_vec(&f_w)); + Ok(()) } #[test] - fn test_check_r1cs_relation() { + fn test_check_r1cs_relation() -> Result<(), Error> { let r1cs = get_test_r1cs::(); let (_, x, w) = get_test_z_split(5); - r1cs.check_relation(&w, &x).unwrap(); + r1cs.check_relation(&w, &x)?; + Ok(()) } } diff --git a/folding-schemes/src/commitment/ipa.rs b/folding-schemes/src/commitment/ipa.rs index 8a6a9893..b0597616 100644 --- a/folding-schemes/src/commitment/ipa.rs +++ b/folding-schemes/src/commitment/ipa.rs @@ -7,14 +7,15 @@ /// i. computation is done in log time following a modification of the equation 3 in section /// 3.2 from the paper. /// ii. s computation is done in 2^{k+1}-2 instead of k*2^k. -use ark_ec::{AffineRepr, CurveGroup}; +use ark_ec::AffineRepr; use ark_ff::{Field, PrimeField}; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, boolean::Boolean, - fields::{nonnative::NonNativeFieldVar, FieldVar}, + convert::ToBitsGadget, + eq::EqGadget, + fields::{emulated_fp::EmulatedFpVar, FieldVar}, prelude::CurveVar, - ToBitsGadget, }; use ark_relations::r1cs::{Namespace, SynthesisError}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -23,15 +24,16 @@ use core::{borrow::Borrow, marker::PhantomData}; use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; use super::{pedersen::Params as PedersenParams, CommitmentScheme}; +use crate::folding::circuits::CF2; use crate::transcript::Transcript; use crate::utils::{ powers_of, vec::{vec_add, vec_scalar_mul}, }; -use crate::Error; +use crate::{Curve, Error}; #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Proof { +pub struct Proof { a: C::ScalarField, l: Vec, r: Vec, @@ -42,12 +44,12 @@ pub struct Proof { /// IPA implements the Inner Product Argument protocol following the CommitmentScheme trait. The /// `H` parameter indicates if to use the commitment in hiding mode or not. #[derive(Debug, Clone, Eq, PartialEq)] -pub struct IPA { +pub struct IPA { _c: PhantomData, } /// Implements the CommitmentScheme trait for IPA -impl CommitmentScheme for IPA { +impl CommitmentScheme for IPA { type ProverParams = PedersenParams; type VerifierParams = PedersenParams; type Proof = (Proof, C::ScalarField, C::ScalarField); // (proof, v=p(x), r=blinding factor) @@ -363,12 +365,12 @@ fn build_s(u: &[F], u_invs: &[F], k: usize) -> Result, Err /// taking 2^{k+1}-2. /// src: https://github.com/zcash/halo2/blob/81729eca91ba4755e247f49c3a72a4232864ec9e/halo2_proofs/src/poly/commitment/verifier.rs#L156 fn build_s_gadget( - u: &[NonNativeFieldVar], - u_invs: &[NonNativeFieldVar], + u: &[EmulatedFpVar], + u_invs: &[EmulatedFpVar], k: usize, -) -> Result>, SynthesisError> { +) -> Result>, SynthesisError> { let d: usize = 2_u64.pow(k as u32) as usize; - let mut s: Vec> = vec![NonNativeFieldVar::one(); d]; + let mut s: Vec> = vec![EmulatedFpVar::one(); d]; for (len, (u_j, u_j_inv)) in u .iter() .zip(u_invs) @@ -422,10 +424,10 @@ fn s_b_inner(u: &[F], x: &F) -> Result { // g(x, u_1, u_2, ..., u_k) = , naively takes linear, but can compute in log time through // g(x, u_1, u_2, ..., u_k) = \Prod u_i x^{2^i} + u_i^-1 fn s_b_inner_gadget( - u: &[NonNativeFieldVar], - x: &NonNativeFieldVar, -) -> Result, SynthesisError> { - let mut c: NonNativeFieldVar = NonNativeFieldVar::::one(); + u: &[EmulatedFpVar], + x: &EmulatedFpVar, +) -> Result, SynthesisError> { + let mut c: EmulatedFpVar = EmulatedFpVar::::one(); let mut x_2_i = x.clone(); // x_2_i is x^{2^i}, starting from x^{2^0}=x for u_i in u.iter() { c *= u_i.clone() * x_2_i.clone() + u_i.inverse()?; @@ -434,40 +436,35 @@ fn s_b_inner_gadget( Ok(c) } -pub type CF = <::BaseField as Field>::BasePrimeField; - -pub struct ProofVar>> { - a: NonNativeFieldVar>, - l: Vec>>, - r: Vec>>, - L: Vec, - R: Vec, +pub struct ProofVar { + a: EmulatedFpVar>, + l: Vec>>, + r: Vec>>, + L: Vec, + R: Vec, } -impl AllocVar, CF> for ProofVar -where - C: CurveGroup, - GC: CurveVar>, - ::BaseField: PrimeField, -{ +impl AllocVar, CF2> for ProofVar { fn new_variable>>( - cs: impl Into>>, + cs: impl Into>>, f: impl FnOnce() -> Result, mode: AllocationMode, ) -> Result { f().and_then(|val| { let cs = cs.into(); - let a = NonNativeFieldVar::>::new_variable( + let a = EmulatedFpVar::>::new_variable( cs.clone(), || Ok(val.borrow().a), mode, )?; - let l: Vec>> = + let l: Vec>> = Vec::new_variable(cs.clone(), || Ok(val.borrow().l.clone()), mode)?; - let r: Vec>> = + let r: Vec>> = Vec::new_variable(cs.clone(), || Ok(val.borrow().r.clone()), mode)?; - let L: Vec = Vec::new_variable(cs.clone(), || Ok(val.borrow().L.clone()), mode)?; - let R: Vec = Vec::new_variable(cs.clone(), || Ok(val.borrow().R.clone()), mode)?; + let L: Vec = + Vec::new_variable(cs.clone(), || Ok(val.borrow().L.clone()), mode)?; + let R: Vec = + Vec::new_variable(cs.clone(), || Ok(val.borrow().R.clone()), mode)?; Ok(Self { a, l, r, L, R }) }) @@ -477,36 +474,26 @@ where /// IPAGadget implements the circuit that verifies an IPA Proof. The `H` parameter indicates if to /// use the commitment in hiding mode or not, reducing a bit the number of constraints needed in /// the later case. -pub struct IPAGadget -where - C: CurveGroup, - GC: CurveVar>, -{ - _cf: PhantomData>, +pub struct IPAGadget { _c: PhantomData, - _gc: PhantomData, } -impl IPAGadget -where - C: CurveGroup, - GC: CurveVar>, -{ +impl IPAGadget { /// Verify the IPA opening proof, K=log2(d), where d is the degree of the committed polynomial, /// and H indicates if the commitment is in hiding mode and thus uses blinding factors, if not, /// there are some constraints saved. #[allow(clippy::too_many_arguments)] pub fn verify( - g: &[GC], // params.generators - h: &GC, // params.h - x: &NonNativeFieldVar>, // evaluation point, challenge - v: &NonNativeFieldVar>, // value at evaluation point - P: &GC, // commitment - p: &ProofVar, - r: &NonNativeFieldVar>, // blinding factor - u: &[NonNativeFieldVar>; K], // challenges - U: &GC, // challenge - ) -> Result>, SynthesisError> { + g: &[C::Var], // params.generators + h: &C::Var, // params.h + x: &EmulatedFpVar>, // evaluation point, challenge + v: &EmulatedFpVar>, // value at evaluation point + P: &C::Var, // commitment + p: &ProofVar, + r: &EmulatedFpVar>, // blinding factor + u: &[EmulatedFpVar>; K], // challenges + U: &C::Var, // challenge + ) -> Result>, SynthesisError> { if p.L.len() != K || p.R.len() != K { return Err(SynthesisError::Unsatisfiable); } @@ -516,7 +503,7 @@ where let mut r = r.clone(); // compute u[i]^-1 once - let mut u_invs = vec![NonNativeFieldVar::>::zero(); u.len()]; + let mut u_invs = vec![EmulatedFpVar::>::zero(); u.len()]; for (j, u_j) in u.iter().enumerate() { u_invs[j] = u_j.inverse()?; } @@ -531,9 +518,23 @@ where } // msm: G= - let mut G = GC::zero(); - for (i, s_i) in s.iter().enumerate() { - G += g[i].scalar_mul_le(s_i.to_bits_le()?.iter())?; + let mut G = C::Var::zero(); + let n = s.len(); + if n % 2 == 1 { + G += g[n - 1].scalar_mul_le(s[n - 1].to_bits_le()?.iter())?; + } else { + G += g[n - 1].joint_scalar_mul_be( + &g[n - 2], + s[n - 1].to_bits_le()?.iter(), + s[n - 2].to_bits_le()?.iter(), + )?; + } + for i in (1..n - 2).step_by(2) { + G += g[i - 1].joint_scalar_mul_be( + &g[i], + s[i - 1].to_bits_le()?.iter(), + s[i].to_bits_le()?.iter(), + )?; } for (j, u_j) in u.iter().enumerate() { @@ -550,11 +551,17 @@ where let q_1 = if H { G.scalar_mul_le(p.a.to_bits_le()?.iter())? - + h.scalar_mul_le(r.to_bits_le()?.iter())? - + U.scalar_mul_le((p.a.clone() * b).to_bits_le()?.iter())? + + h.joint_scalar_mul_be( + &U, + r.to_bits_le()?.iter(), + (p.a.clone() * b).to_bits_le()?.iter(), + )? } else { - G.scalar_mul_le(p.a.to_bits_le()?.iter())? - + U.scalar_mul_le((p.a.clone() * b).to_bits_le()?.iter())? + G.joint_scalar_mul_be( + &U, + p.a.to_bits_le()?.iter(), + (p.a.clone() * b).to_bits_le()?.iter(), + )? }; // q_0 == q_1 q_0.is_eq(&q_1) @@ -564,28 +571,28 @@ where #[cfg(test)] mod tests { use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, CryptographicSponge}; - use ark_ec::Group; + use ark_ec::PrimeGroup; use ark_pallas::{constraints::GVar, Fq, Fr, Projective}; use ark_r1cs_std::eq::EqGadget; use ark_relations::r1cs::ConstraintSystem; - use std::ops::Mul; use super::*; use crate::transcript::poseidon::poseidon_canonical_config; #[test] - fn test_ipa() { - test_ipa_opt::(); - test_ipa_opt::(); + fn test_ipa() -> Result<(), Error> { + let _ = test_ipa_opt::()?; + let _ = test_ipa_opt::()?; + Ok(()) } - fn test_ipa_opt() { + fn test_ipa_opt() -> Result<(), Error> { let mut rng = ark_std::test_rng(); const k: usize = 4; const d: usize = 2_u64.pow(k as u32) as usize; // setup params - let (params, _) = IPA::::setup(&mut rng, d).unwrap(); + let (params, _) = IPA::::setup(&mut rng, d)?; let poseidon_config = poseidon_canonical_config::(); // init Prover's transcript @@ -602,7 +609,7 @@ mod tests { } else { Fr::zero() }; - let cm = IPA::::commit(¶ms, &a, &r_blind).unwrap(); + let cm = IPA::::commit(¶ms, &a, &r_blind)?; let proof = IPA::::prove( ¶ms, @@ -611,25 +618,26 @@ mod tests { &a, &r_blind, Some(&mut rng), - ) - .unwrap(); + )?; - IPA::::verify(¶ms, &mut transcript_v, &cm, &proof).unwrap(); + IPA::::verify(¶ms, &mut transcript_v, &cm, &proof)?; + Ok(()) } #[test] - fn test_ipa_gadget() { - test_ipa_gadget_opt::(); - test_ipa_gadget_opt::(); + fn test_ipa_gadget() -> Result<(), Error> { + let _ = test_ipa_gadget_opt::()?; + let _ = test_ipa_gadget_opt::()?; + Ok(()) } - fn test_ipa_gadget_opt() { + fn test_ipa_gadget_opt() -> Result<(), Error> { let mut rng = ark_std::test_rng(); const k: usize = 3; const d: usize = 2_u64.pow(k as u32) as usize; // setup params - let (params, _) = IPA::::setup(&mut rng, d).unwrap(); + let (params, _) = IPA::::setup(&mut rng, d)?; let poseidon_config = poseidon_canonical_config::(); // init Prover's transcript @@ -646,7 +654,7 @@ mod tests { } else { Fr::zero() }; - let cm = IPA::::commit(¶ms, &a, &r_blind).unwrap(); + let cm = IPA::::commit(¶ms, &a, &r_blind)?; let proof = IPA::::prove( ¶ms, @@ -655,10 +663,9 @@ mod tests { &a, &r_blind, Some(&mut rng), - ) - .unwrap(); + )?; - IPA::::verify(¶ms, &mut transcript_v, &cm, &proof).unwrap(); + IPA::::verify(¶ms, &mut transcript_v, &cm, &proof)?; // circuit let cs = ConstraintSystem::::new_ref(); @@ -667,7 +674,7 @@ mod tests { transcript_v.absorb_nonnative(&cm); let challenge = transcript_v.get_challenge(); // challenge value at which we evaluate let s = transcript_v.get_challenge(); - let U = Projective::generator().mul(s); + let U = Projective::generator() * s; let mut u: Vec = vec![Fr::zero(); k]; for i in (0..k).rev() { transcript_v.absorb_nonnative(&proof.0.L[i]); @@ -676,21 +683,24 @@ mod tests { } // prepare inputs - let gVar = Vec::::new_constant(cs.clone(), params.generators).unwrap(); - let hVar = GVar::new_constant(cs.clone(), params.h).unwrap(); - let challengeVar = - NonNativeFieldVar::::new_witness(cs.clone(), || Ok(challenge)).unwrap(); - let vVar = NonNativeFieldVar::::new_witness(cs.clone(), || Ok(proof.1)).unwrap(); - let cmVar = GVar::new_witness(cs.clone(), || Ok(cm)).unwrap(); - let proofVar = - ProofVar::::new_witness(cs.clone(), || Ok(proof.0)).unwrap(); - let r_blindVar = - NonNativeFieldVar::::new_witness(cs.clone(), || Ok(r_blind)).unwrap(); - let uVar_vec = Vec::>::new_witness(cs.clone(), || Ok(u)).unwrap(); - let uVar: [NonNativeFieldVar; k] = uVar_vec.try_into().unwrap(); - let UVar = GVar::new_witness(cs.clone(), || Ok(U)).unwrap(); - - let v = IPAGadget::::verify::( + let gVar = Vec::::new_constant(cs.clone(), params.generators)?; + let hVar = GVar::new_constant(cs.clone(), params.h)?; + let challengeVar = EmulatedFpVar::::new_witness(cs.clone(), || Ok(challenge))?; + let vVar = EmulatedFpVar::::new_witness(cs.clone(), || Ok(proof.1))?; + let cmVar = GVar::new_witness(cs.clone(), || Ok(cm))?; + let proofVar = ProofVar::::new_witness(cs.clone(), || Ok(proof.0))?; + let r_blindVar = EmulatedFpVar::::new_witness(cs.clone(), || Ok(r_blind))?; + let uVar_vec = Vec::>::new_witness(cs.clone(), || Ok(u))?; + let uVar: [EmulatedFpVar; k] = uVar_vec.try_into().map_err(|_| { + Error::ConversionError( + "Vec<_>".to_string(), + "[_; 1]".to_string(), + "variable name: uVar".to_string(), + ) + })?; + let UVar = GVar::new_witness(cs.clone(), || Ok(U))?; + + let v = IPAGadget::::verify::( &gVar, &hVar, &challengeVar, @@ -700,9 +710,9 @@ mod tests { &r_blindVar, &uVar, &UVar, - ) - .unwrap(); - v.enforce_equal(&Boolean::TRUE).unwrap(); - assert!(cs.is_satisfied().unwrap()); + )?; + v.enforce_equal(&Boolean::TRUE)?; + assert!(cs.is_satisfied()?); + Ok(()) } } diff --git a/folding-schemes/src/commitment/kzg.rs b/folding-schemes/src/commitment/kzg.rs index 5bfce046..eec15bb3 100644 --- a/folding-schemes/src/commitment/kzg.rs +++ b/folding-schemes/src/commitment/kzg.rs @@ -24,17 +24,17 @@ use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use super::CommitmentScheme; use crate::transcript::Transcript; use crate::utils::vec::poly_from_vec; -use crate::Error; +use crate::{Curve, Error}; /// ProverKey defines a similar struct as in ark_poly_commit::kzg10::Powers, but instead of -/// depending on the Pairing trait it depends on the CurveGroup trait. +/// depending on the Pairing trait it depends on the SonobeCurve trait. #[derive(Debug, Clone, Default, Eq, PartialEq)] -pub struct ProverKey<'a, C: CurveGroup> { +pub struct ProverKey<'a, C: Curve> { /// Group elements of the form `β^i G`, for different values of `i`. pub powers_of_g: Cow<'a, [C::Affine]>, } -impl<'a, C: CurveGroup> CanonicalSerialize for ProverKey<'a, C> { +impl<'a, C: Curve> CanonicalSerialize for ProverKey<'a, C> { fn serialize_with_mode( &self, mut writer: W, @@ -48,7 +48,7 @@ impl<'a, C: CurveGroup> CanonicalSerialize for ProverKey<'a, C> { } } -impl<'a, C: CurveGroup> CanonicalDeserialize for ProverKey<'a, C> { +impl<'a, C: Curve> CanonicalDeserialize for ProverKey<'a, C> { fn deserialize_with_mode( reader: R, compress: ark_serialize::Compress, @@ -61,7 +61,7 @@ impl<'a, C: CurveGroup> CanonicalDeserialize for ProverKey<'a, C> { } } -impl<'a, C: CurveGroup> Valid for ProverKey<'a, C> { +impl<'a, C: Curve> Valid for ProverKey<'a, C> { fn check(&self) -> Result<(), ark_serialize::SerializationError> { match self.powers_of_g.clone() { Cow::Borrowed(powers) => powers.to_vec().check(), @@ -71,7 +71,7 @@ impl<'a, C: CurveGroup> Valid for ProverKey<'a, C> { } #[derive(Debug, Clone, Default, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Proof { +pub struct Proof { pub eval: C::ScalarField, pub proof: C, } @@ -83,10 +83,7 @@ pub struct KZG<'a, E: Pairing, const H: bool = false> { _e: PhantomData, } -impl<'a, E, const H: bool> CommitmentScheme for KZG<'a, E, H> -where - E: Pairing, -{ +impl<'a, E: Pairing, const H: bool> CommitmentScheme for KZG<'a, E, H> { type ProverParams = ProverKey<'a, E::G1>; type VerifierParams = VerifierKey; type Proof = Proof; @@ -293,22 +290,22 @@ mod tests { use crate::transcript::poseidon::poseidon_canonical_config; #[test] - fn test_kzg_commitment_scheme() { + fn test_kzg_commitment_scheme() -> Result<(), Error> { let mut rng = &mut test_rng(); let poseidon_config = poseidon_canonical_config::(); let transcript_p = &mut PoseidonSponge::::new(&poseidon_config); let transcript_v = &mut PoseidonSponge::::new(&poseidon_config); let n = 10; - let (pk, vk): (ProverKey, VerifierKey) = - KZG::::setup(&mut rng, n).unwrap(); + let (pk, vk): (ProverKey, VerifierKey) = KZG::::setup(&mut rng, n)?; let v: Vec = std::iter::repeat_with(|| Fr::rand(rng)).take(n).collect(); - let cm = KZG::::commit(&pk, &v, &Fr::zero()).unwrap(); + let cm = KZG::::commit(&pk, &v, &Fr::zero())?; - let proof = KZG::::prove(&pk, transcript_p, &cm, &v, &Fr::zero(), None).unwrap(); + let proof = KZG::::prove(&pk, transcript_p, &cm, &v, &Fr::zero(), None)?; // verify the proof: - KZG::::verify(&vk, transcript_v, &cm, &proof).unwrap(); + KZG::::verify(&vk, transcript_v, &cm, &proof)?; + Ok(()) } } diff --git a/folding-schemes/src/commitment/mod.rs b/folding-schemes/src/commitment/mod.rs index a76ba15e..0c9301d6 100644 --- a/folding-schemes/src/commitment/mod.rs +++ b/folding-schemes/src/commitment/mod.rs @@ -1,10 +1,9 @@ -use ark_ec::CurveGroup; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::fmt::Debug; use ark_std::rand::RngCore; use crate::transcript::Transcript; -use crate::Error; +use crate::{Curve, Error}; pub mod ipa; pub mod kzg; @@ -12,7 +11,7 @@ pub mod pedersen; /// CommitmentScheme defines the vector commitment scheme trait. Where `H` indicates if to use the /// commitment in hiding mode or not. -pub trait CommitmentScheme: Clone + Debug { +pub trait CommitmentScheme: Clone + Debug { type ProverParams: Clone + Debug + CanonicalSerialize + CanonicalDeserialize; type VerifierParams: Clone + Debug + CanonicalSerialize + CanonicalDeserialize; type Proof: Clone + Debug + CanonicalSerialize + CanonicalDeserialize; @@ -74,7 +73,7 @@ mod tests { use ark_bn254::{Bn254, Fr, G1Projective as G1}; use ark_crypto_primitives::sponge::{ poseidon::{PoseidonConfig, PoseidonSponge}, - Absorb, CryptographicSponge, + CryptographicSponge, }; use ark_poly_commit::kzg10::VerifierKey; use ark_std::Zero; @@ -86,7 +85,7 @@ mod tests { use crate::transcript::poseidon::poseidon_canonical_config; #[test] - fn test_homomorphic_property_using_Commitment_trait() { + fn test_homomorphic_property_using_Commitment_trait() -> Result<(), Error> { let mut rng = &mut test_rng(); let poseidon_config = poseidon_canonical_config::(); let n: usize = 128; @@ -98,55 +97,50 @@ mod tests { let r = Fr::rand(rng); // setup params for Pedersen & KZG - let (pedersen_params, _) = Pedersen::::setup(&mut rng, n).unwrap(); - let (kzg_pk, kzg_vk): (ProverKey, VerifierKey) = - KZG::::setup(rng, n).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, n)?; + let (kzg_pk, kzg_vk): (ProverKey, VerifierKey) = KZG::::setup(rng, n)?; // test with Pedersen - test_homomorphic_property_using_Commitment_trait_opt::>( + let _ = test_homomorphic_property_using_Commitment_trait_opt::>( &poseidon_config, &pedersen_params, &pedersen_params, r, &v_1, &v_2, - ); + )?; // test with IPA - test_homomorphic_property_using_Commitment_trait_opt::>( + let _ = test_homomorphic_property_using_Commitment_trait_opt::>( &poseidon_config, &pedersen_params, &pedersen_params, r, &v_1, &v_2, - ); + )?; // test with KZG - test_homomorphic_property_using_Commitment_trait_opt::>( + let _ = test_homomorphic_property_using_Commitment_trait_opt::>( &poseidon_config, &kzg_pk, &kzg_vk, r, &v_1, &v_2, - ); + )?; + Ok(()) } - fn test_homomorphic_property_using_Commitment_trait_opt< - C: CurveGroup, - CS: CommitmentScheme, - >( + fn test_homomorphic_property_using_Commitment_trait_opt>( poseidon_config: &PoseidonConfig, prover_params: &CS::ProverParams, verifier_params: &CS::VerifierParams, r: C::ScalarField, v_1: &[C::ScalarField], v_2: &[C::ScalarField], - ) where - ::ScalarField: Absorb, - { + ) -> Result<(), Error> { // compute the commitment of the two vectors using the given CommitmentScheme - let cm_1 = CS::commit(prover_params, v_1, &C::ScalarField::zero()).unwrap(); - let cm_2 = CS::commit(prover_params, v_2, &C::ScalarField::zero()).unwrap(); + let cm_1 = CS::commit(prover_params, v_1, &C::ScalarField::zero())?; + let cm_2 = CS::commit(prover_params, v_2, &C::ScalarField::zero())?; // random linear combination of the commitments and their witnesses (vectors v_i) let cm_3 = cm_1 + cm_2.mul(r); @@ -161,11 +155,11 @@ mod tests { &v_3, &C::ScalarField::zero(), None, - ) - .unwrap(); + )?; // verify the opening proof let transcript_v = &mut PoseidonSponge::::new(poseidon_config); - CS::verify(verifier_params, transcript_v, &cm_3, &proof).unwrap(); + CS::verify(verifier_params, transcript_v, &cm_3, &proof)?; + Ok(()) } } diff --git a/folding-schemes/src/commitment/pedersen.rs b/folding-schemes/src/commitment/pedersen.rs index 6aac49af..77e16ce5 100644 --- a/folding-schemes/src/commitment/pedersen.rs +++ b/folding-schemes/src/commitment/pedersen.rs @@ -1,37 +1,34 @@ -use ark_ec::CurveGroup; -use ark_ff::Field; -use ark_r1cs_std::{boolean::Boolean, prelude::CurveVar}; +use ark_r1cs_std::{boolean::Boolean, convert::ToBitsGadget, prelude::CurveVar}; use ark_relations::r1cs::SynthesisError; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use ark_std::Zero; -use ark_std::{rand::RngCore, UniformRand}; -use core::marker::PhantomData; +use ark_std::{marker::PhantomData, rand::RngCore, UniformRand, Zero}; use super::CommitmentScheme; +use crate::folding::circuits::CF2; use crate::transcript::Transcript; use crate::utils::vec::{vec_add, vec_scalar_mul}; -use crate::Error; +use crate::{Curve, Error}; #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Proof { +pub struct Proof { pub R: C, pub u: Vec, pub r_u: C::ScalarField, // blind } #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Params { +pub struct Params { pub h: C, pub generators: Vec, } #[derive(Debug, Clone, Eq, PartialEq)] -pub struct Pedersen { +pub struct Pedersen { _c: PhantomData, } /// Implements the CommitmentScheme trait for Pedersen commitments -impl CommitmentScheme for Pedersen { +impl CommitmentScheme for Pedersen { type ProverParams = Params; type VerifierParams = Params; type Proof = Proof; @@ -177,36 +174,37 @@ impl CommitmentScheme for Pedersen { } } -pub type CF = <::BaseField as Field>::BasePrimeField; - -pub struct PedersenGadget -where - C: CurveGroup, - GC: CurveVar>, -{ - _cf: PhantomData>, +pub struct PedersenGadget { _c: PhantomData, - _gc: PhantomData, } -use ark_r1cs_std::ToBitsGadget; -impl PedersenGadget -where - C: CurveGroup, - GC: CurveVar>, -{ +impl PedersenGadget { pub fn commit( - h: &GC, - g: &[GC], - v: &[Vec>>], - r: &[Boolean>], - ) -> Result { - let mut res = GC::zero(); + h: &C::Var, + g: &[C::Var], + v: &[Vec>>], + r: &[Boolean>], + ) -> Result { + let mut res = C::Var::zero(); if H { res += h.scalar_mul_le(r.iter())?; } - for (i, v_i) in v.iter().enumerate() { - res += g[i].scalar_mul_le(v_i.to_bits_le()?.iter())?; + let n = v.len(); + if n % 2 == 1 { + res += g[n - 1].scalar_mul_le(v[n - 1].to_bits_le()?.iter())?; + } else { + res += g[n - 1].joint_scalar_mul_be( + &g[n - 2], + v[n - 1].to_bits_le()?.iter(), + v[n - 2].to_bits_le()?.iter(), + )?; + } + for i in (1..n - 2).step_by(2) { + res += g[i - 1].joint_scalar_mul_be( + &g[i], + v[i - 1].to_bits_le()?.iter(), + v[i].to_bits_le()?.iter(), + )?; } Ok(res) } @@ -224,16 +222,17 @@ mod tests { use crate::transcript::poseidon::poseidon_canonical_config; #[test] - fn test_pedersen() { - test_pedersen_opt::(); - test_pedersen_opt::(); + fn test_pedersen() -> Result<(), Error> { + let _ = test_pedersen_opt::()?; + let _ = test_pedersen_opt::()?; + Ok(()) } - fn test_pedersen_opt() { + fn test_pedersen_opt() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let n: usize = 10; // setup params - let (params, _) = Pedersen::::setup(&mut rng, n).unwrap(); + let (params, _) = Pedersen::::setup(&mut rng, n)?; let poseidon_config = poseidon_canonical_config::(); // init Prover's transcript @@ -250,30 +249,25 @@ mod tests { } else { Fr::zero() }; - let cm = Pedersen::::commit(¶ms, &v, &r).unwrap(); + let cm = Pedersen::::commit(¶ms, &v, &r)?; let proof = - Pedersen::::prove(¶ms, &mut transcript_p, &cm, &v, &r, None) - .unwrap(); - Pedersen::::verify(¶ms, &mut transcript_v, &cm, &proof).unwrap(); + Pedersen::::prove(¶ms, &mut transcript_p, &cm, &v, &r, None)?; + Pedersen::::verify(¶ms, &mut transcript_v, &cm, &proof)?; + Ok(()) } - /// To run this test: - /// > cargo test --release test_pedersen_circuit -- --nocapture #[test] - fn test_pedersen_circuit() { - test_pedersen_circuit_opt::(); - // test_pedersen_circuit_opt::(); + fn test_pedersen_circuit() -> Result<(), Error> { + let _ = test_pedersen_circuit_opt::()?; + let _ = test_pedersen_circuit_opt::()?; + Ok(()) } - fn test_pedersen_circuit_opt() { + fn test_pedersen_circuit_opt() -> Result<(), Error> { let mut rng = ark_std::test_rng(); - // toy value: - // let n: usize = 8; - // real CycleFold value: - let n: usize = 1355; - + let n: usize = 8; // setup params - let (params, _) = Pedersen::::setup(&mut rng, n).unwrap(); + let (params, _) = Pedersen::::setup(&mut rng, n)?; let v: Vec = std::iter::repeat_with(|| Fr::rand(&mut rng)) .take(n) @@ -284,7 +278,7 @@ mod tests { } else { Fr::zero() }; - let cm = Pedersen::::commit(¶ms, &v, &r).unwrap(); + let cm = Pedersen::::commit(¶ms, &v, &r)?; let v_bits: Vec> = v.iter().map(|val| val.into_bigint().to_bits_le()).collect(); let r_bits: Vec = r.into_bigint().to_bits_le(); @@ -295,24 +289,16 @@ mod tests { // prepare inputs let vVar: Vec>> = v_bits .iter() - .map(|val_bits| { - Vec::>::new_witness(cs.clone(), || Ok(val_bits.clone())).unwrap() - }) - .collect(); - let rVar = Vec::>::new_witness(cs.clone(), || Ok(r_bits)).unwrap(); - let gVar = Vec::::new_witness(cs.clone(), || Ok(params.generators)).unwrap(); - let hVar = GVar::new_witness(cs.clone(), || Ok(params.h)).unwrap(); - let expected_cmVar = GVar::new_witness(cs.clone(), || Ok(cm)).unwrap(); + .map(|val_bits| Vec::>::new_witness(cs.clone(), || Ok(val_bits.clone()))) + .collect::>()?; + let rVar = Vec::>::new_witness(cs.clone(), || Ok(r_bits))?; + let gVar = Vec::::new_witness(cs.clone(), || Ok(params.generators))?; + let hVar = GVar::new_witness(cs.clone(), || Ok(params.h))?; + let expected_cmVar = GVar::new_witness(cs.clone(), || Ok(cm))?; // use the gadget - // THIS is the method that takes ~3.5M r1cs constraints (and in the actual circuit we do - // this circuit 2 times): - let cmVar = - PedersenGadget::::commit(&hVar, &gVar, &vVar, &rVar).unwrap(); - cmVar.enforce_equal(&expected_cmVar).unwrap(); - println!( - "num_constraints Pedersen check natively: {}", - cs.num_constraints() - ); + let cmVar = PedersenGadget::::commit(&hVar, &gVar, &vVar, &rVar)?; + cmVar.enforce_equal(&expected_cmVar)?; + Ok(()) } } diff --git a/folding-schemes/src/folding/circuits/cyclefold.rs b/folding-schemes/src/folding/circuits/cyclefold.rs index ca71efc6..1511221e 100644 --- a/folding-schemes/src/folding/circuits/cyclefold.rs +++ b/folding-schemes/src/folding/circuits/cyclefold.rs @@ -1,39 +1,39 @@ /// Contains [CycleFold](https://eprint.iacr.org/2023/1192.pdf) related circuits and functions that /// are shared across the different folding schemes -use ark_crypto_primitives::sponge::{Absorb, CryptographicSponge}; -use ark_ec::{AffineRepr, CurveGroup, Group}; -use ark_ff::{BigInteger, Field, PrimeField}; +use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, Absorb, CryptographicSponge}; +use ark_ec::AffineRepr; +use ark_ff::{BigInteger, PrimeField}; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, boolean::Boolean, + convert::ToConstraintFieldGadget, eq::EqGadget, fields::fp::FpVar, prelude::CurveVar, - ToConstraintFieldGadget, + R1CSVar, }; use ark_relations::r1cs::{ ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, Namespace, SynthesisError, }; use ark_std::fmt::Debug; use ark_std::rand::RngCore; -use ark_std::{One, Zero}; +use ark_std::Zero; use core::{borrow::Borrow, marker::PhantomData}; use super::{nonnative::uint::NonNativeUintVar, CF1, CF2}; +use crate::arith::{ + r1cs::{circuits::R1CSMatricesVar, extract_w_x, R1CS}, + Arith, ArithRelationGadget, +}; use crate::commitment::CommitmentScheme; use crate::constants::NOVA_N_BITS_RO; -use crate::folding::nova::nifs::{nova::NIFS, NIFSTrait}; +use crate::folding::{ + nova::nifs::{nova::NIFS, NIFSTrait}, + traits::InputizeNonNative, +}; use crate::transcript::{AbsorbNonNative, AbsorbNonNativeGadget, Transcript, TranscriptVar}; use crate::utils::gadgets::{EquivalenceGadget, VectorGadget}; -use crate::Error; -use crate::{ - arith::{ - r1cs::{circuits::R1CSMatricesVar, extract_w_x, R1CS}, - ArithGadget, - }, - folding::traits::Inputize, -}; -use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; +use crate::{Curve, Error}; /// Re-export the Nova committed instance as `CycleFoldCommittedInstance` and /// witness as `CycleFoldWitness`, for clarity and consistency @@ -41,48 +41,31 @@ pub use crate::folding::nova::{ CommittedInstance as CycleFoldCommittedInstance, Witness as CycleFoldWitness, }; -impl>> Inputize, CycleFoldCommittedInstanceVar> - for CycleFoldCommittedInstance -{ - fn inputize(&self) -> Vec> { - let zero = (&C::BaseField::zero(), &C::BaseField::zero()); - let cmE = self.cmE.into_affine(); - let cmW = self.cmW.into_affine(); - let (cmE_x, cmE_y) = cmE.xy().unwrap_or(zero); - let (cmW_x, cmW_y) = cmW.xy().unwrap_or(zero); - self.u - .inputize() - .into_iter() - .chain(self.x.iter().flat_map(|x| x.inputize())) - .chain( - [ - *cmE_x, - *cmE_y, - C::BaseField::one(), - *cmW_x, - *cmW_y, - C::BaseField::one(), - ] - .into_iter() - .flat_map(|x| x.to_base_prime_field_elements()), - ) - .collect() +impl InputizeNonNative> for CycleFoldCommittedInstance { + /// Returns the internal representation in the same order as how the value + /// is allocated in `CycleFoldCommittedInstanceVar::new_input`. + fn inputize_nonnative(&self) -> Vec> { + [ + self.u.inputize_nonnative(), + self.x.inputize_nonnative(), + self.cmE.inputize(), + self.cmW.inputize(), + ] + .concat() } } /// CycleFoldCommittedInstanceVar is the CycleFold CommittedInstance represented /// in folding verifier circuit #[derive(Debug, Clone)] -pub struct CycleFoldCommittedInstanceVar>> { - pub cmE: GC, +pub struct CycleFoldCommittedInstanceVar { + pub cmE: C::Var, pub u: NonNativeUintVar>, - pub cmW: GC, + pub cmW: C::Var, pub x: Vec>>, } -impl AllocVar, CF2> for CycleFoldCommittedInstanceVar -where - C: CurveGroup, - GC: CurveVar>, +impl AllocVar, CF2> + for CycleFoldCommittedInstanceVar { fn new_variable>>( cs: impl Into>>, @@ -96,30 +79,21 @@ where NonNativeUintVar::>::new_variable(cs.clone(), || Ok(val.borrow().u), mode)?; let x: Vec>> = Vec::new_variable(cs.clone(), || Ok(val.borrow().x.clone()), mode)?; - let cmE = GC::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?; - let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?; + let cmE = C::Var::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?; + let cmW = C::Var::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?; Ok(Self { cmE, u, cmW, x }) }) } } -impl AbsorbNonNative for CycleFoldCommittedInstance -where - C::BaseField: PrimeField + Absorb, -{ +impl AbsorbNonNative for CycleFoldCommittedInstance { // Compatible with the in-circuit `CycleFoldCommittedInstanceVar::to_native_sponge_field_elements` - fn to_native_sponge_field_elements(&self, dest: &mut Vec) { - [self.u].to_native_sponge_field_elements(dest); + fn to_native_sponge_field_elements(&self, dest: &mut Vec) { + self.u.to_native_sponge_field_elements(dest); self.x.to_native_sponge_field_elements(dest); - let (cmE_x, cmE_y) = match self.cmE.into_affine().xy() { - Some((&x, &y)) => (x, y), - None => (C::BaseField::zero(), C::BaseField::zero()), - }; - let (cmW_x, cmW_y) = match self.cmW.into_affine().xy() { - Some((&x, &y)) => (x, y), - None => (C::BaseField::zero(), C::BaseField::zero()), - }; + let (cmE_x, cmE_y) = self.cmE.into_affine().xy().unwrap_or_default(); + let (cmW_x, cmW_y) = self.cmW.into_affine().xy().unwrap_or_default(); cmE_x.to_sponge_field_elements(dest); cmE_y.to_sponge_field_elements(dest); cmW_x.to_sponge_field_elements(dest); @@ -127,12 +101,7 @@ where } } -impl AbsorbNonNativeGadget for CycleFoldCommittedInstanceVar -where - C: CurveGroup, - GC: CurveVar> + ToConstraintFieldGadget>, - C::BaseField: PrimeField + Absorb, -{ +impl AbsorbNonNativeGadget for CycleFoldCommittedInstanceVar { /// Extracts the underlying field elements from `CycleFoldCommittedInstanceVar`, in the order /// of `u`, `x`, `cmE.x`, `cmE.y`, `cmW.x`, `cmW.y`, `cmE.is_inf || cmW.is_inf` (|| is for /// concat). @@ -159,10 +128,7 @@ where } } -impl CycleFoldCommittedInstance -where - C::BaseField: PrimeField + Absorb, -{ +impl CycleFoldCommittedInstance { /// hash_cyclefold implements the committed instance hash compatible with the /// in-circuit implementation `CycleFoldCommittedInstanceVar::hash`. /// Returns `H(U_i)`, where `U_i` is a `CycleFoldCommittedInstance`. @@ -178,12 +144,7 @@ where } } -impl CycleFoldCommittedInstanceVar -where - C: CurveGroup, - GC: CurveVar> + ToConstraintFieldGadget>, - C::BaseField: PrimeField + Absorb, -{ +impl CycleFoldCommittedInstanceVar { /// hash implements the committed instance hash compatible with the native /// implementation `CycleFoldCommittedInstance::hash_cyclefold`. /// Returns `H(U_i)`, where `U` is a `CycleFoldCommittedInstanceVar`. @@ -209,59 +170,18 @@ where } } -/// CommittedInstanceInCycleFoldVar represents the Nova CommittedInstance in the CycleFold circuit, -/// where the commitments to E and W (cmW and cmW) from the CommittedInstance on the E2, -/// represented as native points, which are folded on the auxiliary curve constraints field (E2::Fr -/// = E1::Fq). -#[derive(Debug, Clone)] -pub struct CommittedInstanceInCycleFoldVar>> { - _c: PhantomData, - pub cmE: GC, - pub cmW: GC, -} - -impl AllocVar, CF2> - for CommittedInstanceInCycleFoldVar -where - C: CurveGroup, - GC: CurveVar>, -{ - fn new_variable>>( - cs: impl Into>>, - f: impl FnOnce() -> Result, - mode: AllocationMode, - ) -> Result { - f().and_then(|val| { - let cs = cs.into(); - - let cmE = GC::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?; - let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?; - - Ok(Self { - _c: PhantomData, - cmE, - cmW, - }) - }) - } -} - /// In-circuit representation of the Witness associated to the CommittedInstance, but with /// non-native representation, since it is used to represent the CycleFold witness. This struct is /// used in the Decider circuit. #[derive(Debug, Clone)] -pub struct CycleFoldWitnessVar { +pub struct CycleFoldWitnessVar { pub E: Vec>>, pub rE: NonNativeUintVar>, pub W: Vec>>, pub rW: NonNativeUintVar>, } -impl AllocVar, CF2> for CycleFoldWitnessVar -where - C: CurveGroup, - C::BaseField: PrimeField, -{ +impl AllocVar, CF2> for CycleFoldWitnessVar { fn new_variable>>( cs: impl Into>>, f: impl FnOnce() -> Result, @@ -284,24 +204,18 @@ where /// This is the gadget used in the AugmentedFCircuit to verify the CycleFold instances folding, /// which checks the correct RLC of u,x,cmE,cmW (hence the name containing 'Full', since it checks /// all the RLC values, not only the native ones). It assumes that ci2.cmE=0, ci2.u=1. -pub struct NIFSFullGadget>> { +pub struct NIFSFullGadget { _c: PhantomData, - _gc: PhantomData, } -impl>> NIFSFullGadget -where - C: CurveGroup, - GC: CurveVar>, - C::BaseField: PrimeField, -{ +impl NIFSFullGadget { pub fn fold_committed_instance( r_bits: Vec>>, - cmT: GC, - ci1: CycleFoldCommittedInstanceVar, + cmT: C::Var, + ci1: CycleFoldCommittedInstanceVar, // ci2 is assumed to be always with cmE=0, u=1 (checks done previous to this method) - ci2: CycleFoldCommittedInstanceVar, - ) -> Result, SynthesisError> { + ci2: CycleFoldCommittedInstanceVar, + ) -> Result, SynthesisError> { // r_nonnat is equal to r_bits just that in a different format let r_nonnat = { let mut bits = r_bits.clone(); @@ -327,11 +241,11 @@ where pub fn verify( // assumes that r_bits is equal to r_nonnat just that in a different format r_bits: Vec>>, - cmT: GC, - ci1: CycleFoldCommittedInstanceVar, + cmT: C::Var, + ci1: CycleFoldCommittedInstanceVar, // ci2 is assumed to be always with cmE=0, u=1 (checks done previous to this method) - ci2: CycleFoldCommittedInstanceVar, - ci3: CycleFoldCommittedInstanceVar, + ci2: CycleFoldCommittedInstanceVar, + ci3: CycleFoldCommittedInstanceVar, ) -> Result<(), SynthesisError> { let ci = Self::fold_committed_instance(r_bits, cmT, ci1, ci2)?; @@ -346,8 +260,7 @@ where } } -impl>> - ArithGadget, CycleFoldCommittedInstanceVar> +impl ArithRelationGadget, CycleFoldCommittedInstanceVar> for R1CSMatricesVar, NonNativeUintVar>> { type Evaluation = (Vec>>, Vec>>); @@ -355,14 +268,14 @@ impl>> fn eval_relation( &self, w: &CycleFoldWitnessVar, - u: &CycleFoldCommittedInstanceVar, + u: &CycleFoldCommittedInstanceVar, ) -> Result { self.eval_at_z(&[&[u.u.clone()][..], &u.x, &w.W].concat()) } fn enforce_evaluation( w: &CycleFoldWitnessVar, - _u: &CycleFoldCommittedInstanceVar, + _u: &CycleFoldCommittedInstanceVar, (AzBz, uCz): Self::Evaluation, ) -> Result<(), SynthesisError> { EquivalenceGadget::>::enforce_equivalent(&AzBz[..], &uCz.add(&w.E)?[..]) @@ -371,16 +284,10 @@ impl>> /// CycleFoldChallengeGadget computes the RO challenge used for the CycleFold instances NIFS, it contains a /// rust-native and a in-circuit compatible versions. -pub struct CycleFoldChallengeGadget>> { +pub struct CycleFoldChallengeGadget { _c: PhantomData, // Nova's Curve2, the one used for the CycleFold circuit - _gc: PhantomData, } -impl CycleFoldChallengeGadget -where - C: CurveGroup, - GC: CurveVar> + ToConstraintFieldGadget>, - C::BaseField: PrimeField + Absorb, -{ +impl CycleFoldChallengeGadget { pub fn get_challenge_native>( transcript: &mut T, pp_hash: C::BaseField, // public params hash @@ -400,8 +307,8 @@ where transcript: &mut T, pp_hash: FpVar, // public params hash U_i_vec: Vec>, - u_i: CycleFoldCommittedInstanceVar, - cmT: GC, + u_i: CycleFoldCommittedInstanceVar, + cmT: C::Var, ) -> Result>, SynthesisError> { transcript.absorb(&pp_hash)?; transcript.absorb(&U_i_vec)?; @@ -437,50 +344,42 @@ pub trait CycleFoldConfig { Self::RANDOMNESS_BIT_LENGTH.div_ceil(Self::FIELD_CAPACITY) + 2 * Self::N_INPUT_POINTS + 2 }; - type F: Field; - type C: CurveGroup; + type C: Curve; } /// CycleFoldCircuit contains the constraints that check the correct fold of the committed /// instances from Curve1. Namely, it checks the random linear combinations of the elliptic curve /// (Curve1) points of u_i, U_i leading to U_{i+1} #[derive(Debug, Clone)] -pub struct CycleFoldCircuit> { - pub _gc: PhantomData, +pub struct CycleFoldCircuit { /// r_bits is the bit representation of the r whose powers are used in the /// random-linear-combination inside the CycleFoldCircuit pub r_bits: Option>, /// points to be folded in the CycleFoldCircuit pub points: Option>, - /// public inputs (cf_u_{i+1}.x) - pub x: Option>, } -impl> CycleFoldCircuit { +impl CycleFoldCircuit { /// n_points indicates the number of points being folded in the CycleFoldCircuit pub fn empty() -> Self { Self { - _gc: PhantomData, r_bits: None, points: None, - x: None, } } } -impl> ConstraintSynthesizer - for CycleFoldCircuit -where - GC: ToConstraintFieldGadget, - CFG::F: PrimeField, -{ - fn generate_constraints(self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { - let r_bits = Vec::>::new_witness(cs.clone(), || { +impl ConstraintSynthesizer> for CycleFoldCircuit { + fn generate_constraints( + self, + cs: ConstraintSystemRef>, + ) -> Result<(), SynthesisError> { + let r_bits = Vec::>>::new_witness(cs.clone(), || { Ok(self .r_bits .unwrap_or(vec![false; CFG::RANDOMNESS_BIT_LENGTH])) })?; - let points = Vec::::new_witness(cs.clone(), || { + let points = Vec::<::Var>::new_witness(cs.clone(), || { Ok(self .points .unwrap_or(vec![CFG::C::zero(); CFG::N_INPUT_POINTS])) @@ -501,27 +400,21 @@ where // P_folded = p_0 + r * P_1 + r^2 * P_2 + r^3 * P_3 + ... + r^{n-2} * P_{n-2} + r^{n-1} * P_{n-1} // so in order to do it more efficiently (less constraints) we do // P_folded = (((P_{n-1} * r + P_{n-2}) * r + P_{n-3})... ) * r + P_0 - let mut p_folded: GC = points[CFG::N_INPUT_POINTS - 1].clone(); + let mut p_folded = points[CFG::N_INPUT_POINTS - 1].clone(); for i in (0..CFG::N_INPUT_POINTS - 1).rev() { p_folded = p_folded.scalar_mul_le(r_bits.iter())? + points[i].clone(); } - let x = Vec::>::new_input(cs.clone(), || { - Ok(self.x.unwrap_or(vec![CFG::F::zero(); CFG::IO_LEN])) - })?; - #[cfg(test)] - assert_eq!(x.len(), CFG::IO_LEN); // non-constrained sanity check - // Check that the points coordinates are placed as the public input x: // In Nova, this is: x == [r, p1, p2, p3] (wheere p3 is the p_folded). // In multifolding schemes such as HyperNova, this is: // computed_x = [r, p_0, p_1, p_2, ..., p_n, p_folded], // where each p_i is in fact p_i.to_constraint_field() let r_fp = r_bits - .chunks(CFG::F::MODULUS_BIT_SIZE as usize - 1) - .map(Boolean::le_bits_to_fp_var) + .chunks(CF2::::MODULUS_BIT_SIZE as usize - 1) + .map(Boolean::le_bits_to_fp) .collect::, _>>()?; - let points_aux: Vec> = points + let points_aux = points .iter() .map(|p_i| Ok(p_i.to_constraint_field()?[..2].to_vec())) .collect::, SynthesisError>>()? @@ -529,13 +422,25 @@ where .flatten() .collect(); - let computed_x: Vec> = [ + let x = [ r_fp, points_aux, p_folded.to_constraint_field()?[..2].to_vec(), ] .concat(); - computed_x.enforce_equal(&x)?; + #[cfg(test)] + assert_eq!(x.len(), CFG::IO_LEN); // non-constrained sanity check + + // This line "converts" `x` from a witness to a public input. + // Instead of directly modifying the constraint system, we explicitly + // allocate a public input and enforce that its value is indeed `x`. + // While comparing `x` with itself seems redundant, this is necessary + // because: + // - `.value()` allows an honest prover to extract public inputs without + // computing them outside the circuit. + // - `.enforce_equal()` prevents a malicious prover from claiming wrong + // public inputs that are not the honest `x` computed in-circuit. + Vec::new_input(cs.clone(), || x.value())?.enforce_equal(&x)?; Ok(()) } @@ -548,31 +453,11 @@ where /// different fields than the main NIFS impls (Nova, Mova, Ova). Could be abstracted, but it's a /// tradeoff between overcomplexity at the NIFSTrait and the (not much) need of generalization at /// the CycleFoldNIFS. -pub struct CycleFoldNIFS< - C1: CurveGroup, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, - CS2: CommitmentScheme, - const H: bool = false, -> where - ::BaseField: PrimeField, - ::BaseField: PrimeField, -{ - _c1: PhantomData, +pub struct CycleFoldNIFS, const H: bool = false> { _c2: PhantomData, - _gc2: PhantomData, _cs: PhantomData, } -impl, const H: bool> - CycleFoldNIFS -where - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, -{ +impl, const H: bool> CycleFoldNIFS { fn prove( cf_r_Fq: C2::ScalarField, // C2::Fr==C1::Fq cf_W_i: &CycleFoldWitness, @@ -609,84 +494,76 @@ where /// scheme struct because it is used both by Nova & HyperNova's CycleFold. #[allow(clippy::type_complexity)] #[allow(clippy::too_many_arguments)] -pub fn fold_cyclefold_circuit( - transcript: &mut impl Transcript, +pub fn fold_cyclefold_circuit( + transcript: &mut impl Transcript>, cf_r1cs: R1CS, cf_cs_params: CS2::ProverParams, - pp_hash: C1::ScalarField, // public params hash + pp_hash: CF2, // public params hash cf_W_i: CycleFoldWitness, // witness of the running instance cf_U_i: CycleFoldCommittedInstance, // running instance - cf_u_i_x: Vec, - cf_circuit: CycleFoldCircuit, + cf_circuit: CycleFoldCircuit, mut rng: impl RngCore, ) -> Result< ( - CycleFoldWitness, CycleFoldCommittedInstance, // u_i CycleFoldWitness, // W_i1 CycleFoldCommittedInstance, // U_i1 C2, // cmT - C2::ScalarField, // r_Fq ), Error, > where - CFG: CycleFoldConfig>, - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + CFG: CycleFoldConfig, + C2: Curve, BaseField = CF1>, CS2: CommitmentScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, { - let cs2 = ConstraintSystem::::new_ref(); + let cs2 = ConstraintSystem::new_ref(); cf_circuit.generate_constraints(cs2.clone())?; let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; - let (cf_w_i, cf_x_i) = extract_w_x::(&cs2); - if cf_x_i != cf_u_i_x { - return Err(Error::NotEqual); - } + let (cf_w_i, cf_x_i) = extract_w_x(&cs2); #[cfg(test)] assert_eq!(cf_x_i.len(), CFG::IO_LEN); // fold cyclefold instances - let cf_w_i = CycleFoldWitness::::new::(cf_w_i.clone(), cf_r1cs.A.n_rows, &mut rng); - let cf_u_i: CycleFoldCommittedInstance = - cf_w_i.commit::(&cf_cs_params, cf_x_i.clone())?; + let cf_w_i = + CycleFoldWitness::::new::(cf_w_i.clone(), cf_r1cs.n_constraints(), &mut rng); + let cf_u_i = cf_w_i.commit::(&cf_cs_params, cf_x_i.clone())?; // compute T* and cmT* for CycleFoldCircuit - let (cf_T, cf_cmT) = - NIFS::, H>::compute_cyclefold_cmT( - &cf_cs_params, - &cf_r1cs, - &cf_w_i, - &cf_u_i, - &cf_W_i, - &cf_U_i, - )?; + let (cf_T, cf_cmT) = NIFS::>, H>::compute_cyclefold_cmT( + &cf_cs_params, + &cf_r1cs, + &cf_w_i, + &cf_u_i, + &cf_W_i, + &cf_U_i, + )?; - let cf_r_bits = CycleFoldChallengeGadget::::get_challenge_native( + let cf_r_bits = CycleFoldChallengeGadget::get_challenge_native( transcript, pp_hash, cf_U_i.clone(), cf_u_i.clone(), cf_cmT, ); - let cf_r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&cf_r_bits)) + let cf_r_Fq = CF1::::from_bigint(BigInteger::from_bits_le(&cf_r_bits)) .expect("cf_r_bits out of bounds"); - let (cf_W_i1, cf_U_i1) = CycleFoldNIFS::::prove( + let (cf_W_i1, cf_U_i1) = CycleFoldNIFS::::prove( cf_r_Fq, &cf_W_i, &cf_U_i, &cf_w_i, &cf_u_i, &cf_T, cf_cmT, )?; - let cf_r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&cf_r_bits)) - .expect("cf_r_bits out of bounds"); - Ok((cf_w_i, cf_u_i, cf_W_i1, cf_U_i1, cf_cmT, cf_r_Fq)) + + #[cfg(test)] + { + use crate::{arith::ArithRelation, folding::traits::CommittedInstanceOps}; + cf_u_i.check_incoming()?; + cf_r1cs.check_relation(&cf_w_i, &cf_u_i)?; + cf_r1cs.check_relation(&cf_W_i1, &cf_U_i1)?; + } + + Ok((cf_u_i, cf_W_i1, cf_U_i1, cf_cmT)) } #[cfg(test)] @@ -705,41 +582,18 @@ pub mod tests { use crate::transcript::poseidon::poseidon_canonical_config; use crate::utils::get_cm_coordinates; - struct TestCycleFoldConfig { + struct TestCycleFoldConfig { _c: PhantomData, } - impl CycleFoldConfig for TestCycleFoldConfig { + impl CycleFoldConfig for TestCycleFoldConfig { const RANDOMNESS_BIT_LENGTH: usize = NOVA_N_BITS_RO; const N_INPUT_POINTS: usize = N; type C = C; - type F = C::BaseField; } #[test] - fn test_committed_instance_cyclefold_var() { - let mut rng = ark_std::test_rng(); - - let ci = CycleFoldCommittedInstance:: { - cmE: Projective::rand(&mut rng), - u: Fr::rand(&mut rng), - cmW: Projective::rand(&mut rng), - x: vec![Fr::rand(&mut rng); 1], - }; - - // check the instantiation of the CycleFold side: - let cs = ConstraintSystem::::new_ref(); - let ciVar = - CommittedInstanceInCycleFoldVar::::new_witness(cs.clone(), || { - Ok(ci.clone()) - }) - .unwrap(); - assert_eq!(ciVar.cmE.value().unwrap(), ci.cmE); - assert_eq!(ciVar.cmW.value().unwrap(), ci.cmW); - } - - #[test] - fn test_CycleFoldCircuit_n_points_constraints() { + fn test_CycleFoldCircuit_n_points_constraints() -> Result<(), Error> { const n: usize = 16; let mut rng = ark_std::test_rng(); @@ -751,8 +605,10 @@ pub mod tests { use std::ops::Mul; let rho_raw = Fq::rand(&mut rng); let rho_bits = rho_raw.into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec(); - let rho_Fq = Fq::from_bigint(BigInteger::from_bits_le(&rho_bits)).unwrap(); - let rho_Fr = Fr::from_bigint(BigInteger::from_bits_le(&rho_bits)).unwrap(); + let rho_Fq = + Fq::from_bigint(BigInteger::from_bits_le(&rho_bits)).ok_or(Error::OutOfBounds)?; + let rho_Fr = + Fr::from_bigint(BigInteger::from_bits_le(&rho_bits)).ok_or(Error::OutOfBounds)?; let mut res = Projective::zero(); use ark_std::One; let mut rho_i = Fr::one(); @@ -771,18 +627,19 @@ pub mod tests { get_cm_coordinates(&res), ] .concat(); - let cf_circuit = CycleFoldCircuit::, GVar> { - _gc: PhantomData, + let cf_circuit = CycleFoldCircuit::> { r_bits: Some(rho_bits), points: Some(points), - x: Some(x.clone()), }; - cf_circuit.generate_constraints(cs.clone()).unwrap(); - assert!(cs.is_satisfied().unwrap()); + cf_circuit.generate_constraints(cs.clone())?; + assert!(cs.is_satisfied()?); + // `instance_assignment[0]` is the constant term 1 + assert_eq!(&cs.borrow().unwrap().instance_assignment[1..], &x); + Ok(()) } #[test] - fn test_nifs_full_gadget() { + fn test_nifs_full_gadget() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); @@ -811,35 +668,28 @@ pub mod tests { &ci1, &ci2, &cmT, - ) - .unwrap(); + )?; let cs = ConstraintSystem::::new_ref(); - let r_bitsVar = Vec::>::new_witness(cs.clone(), || Ok(r_bits)).unwrap(); - let ci1Var = - CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { - Ok(ci1.clone()) - }) - .unwrap(); - let ci2Var = - CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { - Ok(ci2.clone()) - }) - .unwrap(); - let ci3Var = - CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { - Ok(ci3.clone()) - }) - .unwrap(); - let cmTVar = GVar::new_witness(cs.clone(), || Ok(cmT)).unwrap(); + let r_bitsVar = Vec::>::new_witness(cs.clone(), || Ok(r_bits))?; + let ci1Var = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(ci1.clone()) + })?; + let ci2Var = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(ci2.clone()) + })?; + let ci3Var = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(ci3.clone()) + })?; + let cmTVar = GVar::new_witness(cs.clone(), || Ok(cmT))?; - NIFSFullGadget::::verify(r_bitsVar, cmTVar, ci1Var, ci2Var, ci3Var) - .unwrap(); - assert!(cs.is_satisfied().unwrap()); + NIFSFullGadget::::verify(r_bitsVar, cmTVar, ci1Var, ci2Var, ci3Var)?; + assert!(cs.is_satisfied()?); + Ok(()) } #[test] - fn test_cyclefold_challenge_gadget() { + fn test_cyclefold_challenge_gadget() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); let mut transcript = PoseidonSponge::::new(&poseidon_config); @@ -864,7 +714,7 @@ pub mod tests { // compute the challenge natively let pp_hash = Fq::from(42u32); // only for test - let r_bits = CycleFoldChallengeGadget::::get_challenge_native( + let r_bits = CycleFoldChallengeGadget::::get_challenge_native( &mut transcript, pp_hash, U_i.clone(), @@ -873,40 +723,36 @@ pub mod tests { ); let cs = ConstraintSystem::::new_ref(); - let u_iVar = - CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { - Ok(u_i.clone()) - }) - .unwrap(); - let U_iVar = - CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { - Ok(U_i.clone()) - }) - .unwrap(); - let cmTVar = GVar::new_witness(cs.clone(), || Ok(cmT)).unwrap(); + let u_iVar = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(u_i.clone()) + })?; + let U_iVar = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(U_i.clone()) + })?; + let cmTVar = GVar::new_witness(cs.clone(), || Ok(cmT))?; let mut transcript_var = PoseidonSpongeVar::::new(ConstraintSystem::::new_ref(), &poseidon_config); - let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash)).unwrap(); - let r_bitsVar = CycleFoldChallengeGadget::::get_challenge_gadget( + let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash))?; + let r_bitsVar = CycleFoldChallengeGadget::::get_challenge_gadget( &mut transcript_var, pp_hashVar, - U_iVar.to_native_sponge_field_elements().unwrap(), + U_iVar.to_native_sponge_field_elements()?, u_iVar, cmTVar, - ) - .unwrap(); - assert!(cs.is_satisfied().unwrap()); + )?; + assert!(cs.is_satisfied()?); // check that the natively computed and in-circuit computed hashes match - let rVar = Boolean::le_bits_to_fp_var(&r_bitsVar).unwrap(); - let r = Fq::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap(); - assert_eq!(rVar.value().unwrap(), r); - assert_eq!(r_bitsVar.value().unwrap(), r_bits); + let rVar = Boolean::le_bits_to_fp(&r_bitsVar)?; + let r = Fq::from_bigint(BigInteger::from_bits_le(&r_bits)).ok_or(Error::OutOfBounds)?; + assert_eq!(rVar.value()?, r); + assert_eq!(r_bitsVar.value()?, r_bits); + Ok(()) } #[test] - fn test_cyclefold_hash_gadget() { + fn test_cyclefold_hash_gadget() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); let sponge = PoseidonSponge::::new(&poseidon_config); @@ -923,20 +769,16 @@ pub mod tests { let h = U_i.hash_cyclefold(&sponge, pp_hash); let cs = ConstraintSystem::::new_ref(); - let U_iVar = - CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { - Ok(U_i.clone()) - }) - .unwrap(); - let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash)).unwrap(); - let (hVar, _) = U_iVar - .hash( - &PoseidonSpongeVar::new(cs.clone(), &poseidon_config), - pp_hashVar, - ) - .unwrap(); - hVar.enforce_equal(&FpVar::new_witness(cs.clone(), || Ok(h)).unwrap()) - .unwrap(); - assert!(cs.is_satisfied().unwrap()); + let U_iVar = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(U_i.clone()) + })?; + let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash))?; + let (hVar, _) = U_iVar.hash( + &PoseidonSpongeVar::new(cs.clone(), &poseidon_config), + pp_hashVar, + )?; + hVar.enforce_equal(&FpVar::new_witness(cs.clone(), || Ok(h))?)?; + assert!(cs.is_satisfied()?); + Ok(()) } } diff --git a/folding-schemes/src/folding/circuits/decider/mod.rs b/folding-schemes/src/folding/circuits/decider/mod.rs index f2513a7d..758636c6 100644 --- a/folding-schemes/src/folding/circuits/decider/mod.rs +++ b/folding-schemes/src/folding/circuits/decider/mod.rs @@ -1,13 +1,11 @@ use ark_crypto_primitives::sponge::{ poseidon::constraints::PoseidonSpongeVar, CryptographicSponge, }; -use ark_ec::CurveGroup; use ark_ff::PrimeField; use ark_poly::Polynomial; use ark_r1cs_std::{ fields::{fp::FpVar, FieldVar}, poly::{domain::Radix2DomainVar, evaluations::univariate::EvaluationsVar}, - ToConstraintFieldGadget, }; use ark_relations::r1cs::SynthesisError; use ark_std::log2; @@ -15,8 +13,8 @@ use ark_std::log2; use crate::folding::traits::{CommittedInstanceOps, CommittedInstanceVarOps, Dummy, WitnessOps}; use crate::transcript::{Transcript, TranscriptVar}; use crate::utils::vec::poly_from_vec; -use crate::Error; -use crate::{arith::Arith, folding::circuits::CF1}; +use crate::{arith::ArithRelation, folding::circuits::CF1}; +use crate::{Curve, Error}; pub mod off_chain; pub mod on_chain; @@ -26,11 +24,7 @@ pub mod on_chain; pub struct KZGChallengesGadget {} impl KZGChallengesGadget { - pub fn get_challenges_native< - C: CurveGroup, - T: Transcript>, - U: CommittedInstanceOps, - >( + pub fn get_challenges_native>, U: CommittedInstanceOps>( transcript: &mut T, U_i: &U, ) -> Vec> { @@ -43,7 +37,7 @@ impl KZGChallengesGadget { } pub fn get_challenges_gadget< - C: CurveGroup, + C: Curve, S: CryptographicSponge, T: TranscriptVar, S>, U: CommittedInstanceVarOps, @@ -53,7 +47,7 @@ impl KZGChallengesGadget { ) -> Result>>, SynthesisError> { let mut challenges = vec![]; for cm in U_i.get_commitments() { - transcript.absorb(&cm.to_constraint_field()?)?; + transcript.absorb_nonnative(&cm)?; challenges.push(transcript.get_challenge()?); } Ok(challenges) @@ -101,11 +95,11 @@ impl EvalGadget { /// In the future, we may introduce a better solution that uses a trait for all /// folding schemes that specifies their native and in-circuit behaviors. pub trait DeciderEnabledNIFS< - C: CurveGroup, + C: Curve, RU: CommittedInstanceOps, // Running instance IU: CommittedInstanceOps, // Incoming instance W: WitnessOps>, - A: Arith, + A: ArithRelation, > { type ProofDummyCfg; @@ -155,7 +149,7 @@ pub mod tests { // checks that the gadget and native implementations of the challenge computation match #[test] - fn test_kzg_challenge_gadget() { + fn test_kzg_challenge_gadget() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); let mut transcript = PoseidonSponge::::new(&poseidon_config); @@ -172,20 +166,20 @@ pub mod tests { let cs = ConstraintSystem::::new_ref(); let U_iVar = - CommittedInstanceVar::::new_witness(cs.clone(), || Ok(U_i.clone())) - .unwrap(); + CommittedInstanceVar::::new_witness(cs.clone(), || Ok(U_i.clone()))?; let mut transcript_var = PoseidonSpongeVar::::new(cs.clone(), &poseidon_config); let challenges_var = - KZGChallengesGadget::get_challenges_gadget(&mut transcript_var, &U_iVar).unwrap(); - assert!(cs.is_satisfied().unwrap()); + KZGChallengesGadget::get_challenges_gadget(&mut transcript_var, &U_iVar)?; + assert!(cs.is_satisfied()?); // check that the natively computed and in-circuit computed hashes match - assert_eq!(challenges_var.value().unwrap(), challenges); + assert_eq!(challenges_var.value()?, challenges); + Ok(()) } #[test] - fn test_polynomial_interpolation() { + fn test_polynomial_interpolation() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let n = 12; let l = 1 << n; @@ -196,16 +190,17 @@ pub mod tests { let challenge = Fr::rand(&mut rng); use ark_poly::Polynomial; - let polynomial = poly_from_vec(v.to_vec()).unwrap(); + let polynomial = poly_from_vec(v.to_vec())?; let eval = polynomial.evaluate(&challenge); let cs = ConstraintSystem::::new_ref(); - let vVar = Vec::>::new_witness(cs.clone(), || Ok(v)).unwrap(); - let challengeVar = FpVar::::new_witness(cs.clone(), || Ok(challenge)).unwrap(); + let vVar = Vec::>::new_witness(cs.clone(), || Ok(v))?; + let challengeVar = FpVar::::new_witness(cs.clone(), || Ok(challenge))?; - let evalVar = EvalGadget::evaluate_gadget(&vVar, &challengeVar).unwrap(); + let evalVar = EvalGadget::evaluate_gadget(&vVar, &challengeVar)?; - assert_eq!(evalVar.value().unwrap(), eval); - assert!(cs.is_satisfied().unwrap()); + assert_eq!(evalVar.value()?, eval); + assert!(cs.is_satisfied()?); + Ok(()) } } diff --git a/folding-schemes/src/folding/circuits/decider/off_chain.rs b/folding-schemes/src/folding/circuits/decider/off_chain.rs index c0d4ff53..43ba256c 100644 --- a/folding-schemes/src/folding/circuits/decider/off_chain.rs +++ b/folding-schemes/src/folding/circuits/decider/off_chain.rs @@ -5,19 +5,15 @@ use ark_crypto_primitives::sponge::{ constraints::{AbsorbGadget, CryptographicSpongeVar}, poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig}, - Absorb, -}; -use ark_ec::CurveGroup; -use ark_r1cs_std::{ - alloc::AllocVar, eq::EqGadget, fields::fp::FpVar, prelude::CurveVar, ToConstraintFieldGadget, }; +use ark_r1cs_std::{alloc::AllocVar, eq::EqGadget, fields::fp::FpVar}; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; use ark_std::{marker::PhantomData, Zero}; use crate::{ arith::{ r1cs::{circuits::R1CSMatricesVar, R1CS}, - Arith, ArithGadget, + ArithRelation, ArithRelationGadget, }, folding::{ circuits::{ @@ -31,6 +27,7 @@ use crate::{ nova::{decider_eth_circuit::WitnessVar, nifs::nova_circuits::CommittedInstanceVar}, traits::{CommittedInstanceOps, CommittedInstanceVarOps, Dummy, WitnessOps, WitnessVarOps}, }, + Curve, }; use super::DeciderEnabledNIFS; @@ -38,17 +35,15 @@ use super::DeciderEnabledNIFS; /// Circuit that implements part of the in-circuit checks needed for the offchain verification over /// the Curve2's BaseField (=Curve1's ScalarField). pub struct GenericOffchainDeciderCircuit1< - C1: CurveGroup, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, - RU: CommittedInstanceOps, // Running instance - IU: CommittedInstanceOps, // Incoming instance - W: WitnessOps>, // Witness - A: Arith, // Constraint system - AVar: ArithGadget, // In-circuit representation of `A` + C1: Curve, + C2: Curve, + RU: CommittedInstanceOps, // Running instance + IU: CommittedInstanceOps, // Incoming instance + W: WitnessOps>, // Witness + A: ArithRelation, // Constraint system + AVar: ArithRelationGadget, // In-circuit representation of `A` D: DeciderEnabledNIFS, > { - pub _gc2: PhantomData, pub _avar: PhantomData, /// Constraint system of the Augmented Function circuit pub arith: A, @@ -81,14 +76,13 @@ pub struct GenericOffchainDeciderCircuit1< } impl< - C1: CurveGroup, - C2: CurveGroup, BaseField = CF1>, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, BaseField = CF1>, RU: CommittedInstanceOps + for<'a> Dummy<&'a A>, IU: CommittedInstanceOps + for<'a> Dummy<&'a A>, W: WitnessOps> + for<'a> Dummy<&'a A>, - A: Arith, - AVar: ArithGadget + AllocVar>, + A: ArithRelation, + AVar: ArithRelationGadget + AllocVar>, D: DeciderEnabledNIFS, > Dummy<( @@ -99,7 +93,7 @@ impl< D::RandomnessDummyCfg, usize, usize, - )> for GenericOffchainDeciderCircuit1 + )> for GenericOffchainDeciderCircuit1 { fn dummy( ( @@ -121,7 +115,6 @@ impl< ), ) -> Self { Self { - _gc2: PhantomData, _avar: PhantomData, poseidon_config, pp_hash: Zero::zero(), @@ -145,20 +138,18 @@ impl< } impl< - C1: CurveGroup, - C2: CurveGroup, BaseField = CF1>, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, BaseField = CF1>, RU: CommittedInstanceOps, IU: CommittedInstanceOps, W: WitnessOps>, - A: Arith, - AVar: ArithGadget + AllocVar>, + A: ArithRelation, + AVar: ArithRelationGadget + AllocVar>, D: DeciderEnabledNIFS, > ConstraintSynthesizer> - for GenericOffchainDeciderCircuit1 + for GenericOffchainDeciderCircuit1 where RU::Var: AbsorbGadget> + CommittedInstanceVarOps>, - CF1: Absorb, { fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { let arith = AVar::new_witness(cs.clone(), || Ok(&self.arith))?; @@ -179,7 +170,7 @@ where U_i1.get_commitments().enforce_equal(&U_i1_commitments)?; let cf_U_i = - CycleFoldCommittedInstanceVar::::new_input(cs.clone(), || Ok(self.cf_U_i))?; + CycleFoldCommittedInstanceVar::::new_input(cs.clone(), || Ok(self.cf_U_i))?; // allocate the inputs for the checks 7.1 and 7.2 let kzg_challenges = Vec::new_input(cs.clone(), || Ok(self.kzg_challenges))?; @@ -236,7 +227,7 @@ where /// Circuit that implements part of the in-circuit checks needed for the offchain verification over /// the Curve1's BaseField (=Curve2's ScalarField). -pub struct GenericOffchainDeciderCircuit2 { +pub struct GenericOffchainDeciderCircuit2 { /// R1CS of the CycleFold circuit pub cf_arith: R1CS>, pub poseidon_config: PoseidonConfig>, @@ -252,7 +243,7 @@ pub struct GenericOffchainDeciderCircuit2 { pub kzg_evaluations: Vec>, } -impl Dummy<(R1CS>, PoseidonConfig>, usize)> +impl Dummy<(R1CS>, PoseidonConfig>, usize)> for GenericOffchainDeciderCircuit2 { fn dummy( @@ -274,7 +265,7 @@ impl Dummy<(R1CS>, PoseidonConfig>, usize)> } } -impl ConstraintSynthesizer> for GenericOffchainDeciderCircuit2 { +impl ConstraintSynthesizer> for GenericOffchainDeciderCircuit2 { fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { let cf_r1cs = R1CSMatricesVar::, FpVar>>::new_witness(cs.clone(), || { Ok(self.cf_arith.clone()) diff --git a/folding-schemes/src/folding/circuits/decider/on_chain.rs b/folding-schemes/src/folding/circuits/decider/on_chain.rs index 98a62002..786054b0 100644 --- a/folding-schemes/src/folding/circuits/decider/on_chain.rs +++ b/folding-schemes/src/folding/circuits/decider/on_chain.rs @@ -3,17 +3,13 @@ use ark_crypto_primitives::sponge::{ constraints::{AbsorbGadget, CryptographicSpongeVar}, poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig}, - Absorb, -}; -use ark_ec::CurveGroup; -use ark_r1cs_std::{ - alloc::AllocVar, eq::EqGadget, fields::fp::FpVar, prelude::CurveVar, ToConstraintFieldGadget, }; +use ark_r1cs_std::{alloc::AllocVar, eq::EqGadget, fields::fp::FpVar}; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; use ark_std::{marker::PhantomData, Zero}; use crate::{ - arith::{r1cs::R1CS, Arith, ArithGadget}, + arith::{r1cs::R1CS, ArithRelation, ArithRelationGadget}, commitment::pedersen::Params as PedersenParams, folding::{ circuits::{ @@ -26,6 +22,7 @@ use crate::{ }, traits::{CommittedInstanceOps, CommittedInstanceVarOps, Dummy, WitnessOps, WitnessVarOps}, }, + Curve, }; use super::DeciderEnabledNIFS; @@ -62,17 +59,15 @@ use super::DeciderEnabledNIFS; /// /// For more details, see [https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html]. pub struct GenericOnchainDeciderCircuit< - C1: CurveGroup, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, - RU: CommittedInstanceOps, // Running instance - IU: CommittedInstanceOps, // Incoming instance - W: WitnessOps>, // Witness - A: Arith, // Constraint system - AVar: ArithGadget, // In-circuit representation of `A` + C1: Curve, + C2: Curve, + RU: CommittedInstanceOps, // Running instance + IU: CommittedInstanceOps, // Incoming instance + W: WitnessOps>, // Witness + A: ArithRelation, // Constraint system + AVar: ArithRelationGadget, // In-circuit representation of `A` D: DeciderEnabledNIFS, > { - pub _gc2: PhantomData, pub _avar: PhantomData, /// Constraint system of the Augmented Function circuit pub arith: A, @@ -110,14 +105,13 @@ pub struct GenericOnchainDeciderCircuit< } impl< - C1: CurveGroup, - C2: CurveGroup, BaseField = CF1>, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, BaseField = CF1>, RU: CommittedInstanceOps + for<'a> Dummy<&'a A>, IU: CommittedInstanceOps + for<'a> Dummy<&'a A>, W: WitnessOps> + for<'a> Dummy<&'a A>, - A: Arith, - AVar: ArithGadget + AllocVar>, + A: ArithRelation, + AVar: ArithRelationGadget + AllocVar>, D: DeciderEnabledNIFS, > Dummy<( @@ -129,7 +123,7 @@ impl< D::RandomnessDummyCfg, usize, usize, - )> for GenericOnchainDeciderCircuit + )> for GenericOnchainDeciderCircuit { fn dummy( ( @@ -153,7 +147,6 @@ impl< ), ) -> Self { Self { - _gc2: PhantomData, _avar: PhantomData, cf_pedersen_params, poseidon_config, @@ -180,20 +173,17 @@ impl< } impl< - C1: CurveGroup, - C2: CurveGroup, BaseField = CF1>, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, BaseField = CF1>, RU: CommittedInstanceOps, IU: CommittedInstanceOps, W: WitnessOps>, - A: Arith, - AVar: ArithGadget + AllocVar>, + A: ArithRelation, + AVar: ArithRelationGadget + AllocVar>, D: DeciderEnabledNIFS, - > ConstraintSynthesizer> - for GenericOnchainDeciderCircuit + > ConstraintSynthesizer> for GenericOnchainDeciderCircuit where RU::Var: AbsorbGadget> + CommittedInstanceVarOps>, - CF1: Absorb, { fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { let arith = AVar::new_witness(cs.clone(), || Ok(&self.arith))?; @@ -214,7 +204,7 @@ where U_i1.get_commitments().enforce_equal(&U_i1_commitments)?; let cf_U_i = - CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || Ok(self.cf_U_i))?; + CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || Ok(self.cf_U_i))?; // allocate the inputs for the check 7.1 and 7.2 let kzg_challenges = Vec::new_input(cs.clone(), || Ok(self.kzg_challenges))?; @@ -258,15 +248,15 @@ where cyclefold::CycleFoldWitnessVar, nonnative::uint::NonNativeUintVar, }, }; - use ark_r1cs_std::ToBitsGadget; + use ark_r1cs_std::{convert::ToBitsGadget, groups::CurveVar}; let cf_W_i = CycleFoldWitnessVar::::new_witness(cs.clone(), || Ok(self.cf_W_i))?; // 4. check Pedersen commitments of cf_U_i.{cmE, cmW} - let H = GC2::constant(self.cf_pedersen_params.h); + let H = C2::Var::constant(self.cf_pedersen_params.h); let G = self .cf_pedersen_params .generators .iter() - .map(|&g| GC2::constant(g.into())) + .map(|&g| C2::Var::constant(g.into())) .collect::>(); let cf_W_i_E_bits = cf_W_i .E @@ -278,9 +268,9 @@ where .iter() .map(|W_i| W_i.to_bits_le()) .collect::, _>>()?; - PedersenGadget::::commit(&H, &G, &cf_W_i_E_bits, &cf_W_i.rE.to_bits_le()?)? + PedersenGadget::::commit(&H, &G, &cf_W_i_E_bits, &cf_W_i.rE.to_bits_le()?)? .enforce_equal(&cf_U_i.cmE)?; - PedersenGadget::::commit(&H, &G, &cf_W_i_W_bits, &cf_W_i.rW.to_bits_le()?)? + PedersenGadget::::commit(&H, &G, &cf_W_i_W_bits, &cf_W_i.rW.to_bits_le()?)? .enforce_equal(&cf_U_i.cmW)?; let cf_r1cs = R1CSMatricesVar::, NonNativeUintVar>>::new_constant( diff --git a/folding-schemes/src/folding/circuits/mod.rs b/folding-schemes/src/folding/circuits/mod.rs index 48d5cf11..5b6af02b 100644 --- a/folding-schemes/src/folding/circuits/mod.rs +++ b/folding-schemes/src/folding/circuits/mod.rs @@ -1,5 +1,5 @@ /// Circuits and gadgets shared across the different folding schemes. -use ark_ec::{CurveGroup, Group}; +use ark_ec::{CurveGroup, PrimeGroup}; use ark_ff::Field; pub mod cyclefold; @@ -11,7 +11,7 @@ pub mod utils; /// CF1 uses the ScalarField of the given C. CF1 represents the ConstraintField used for the main /// folding circuit which is over E1::Fr, where E1 is the main curve where we do the folding. /// In CF1, the points of C can not be natively represented. -pub type CF1 = ::ScalarField; +pub type CF1 = ::ScalarField; /// CF2 uses the BaseField of the given C. CF2 represents the ConstraintField used for the /// CycleFold circuit which is over E2::Fr=E1::Fq, where E2 is the auxiliary curve (from /// [CycleFold](https://eprint.iacr.org/2023/1192.pdf) approach) where we check the folding of the diff --git a/folding-schemes/src/folding/circuits/nonnative/affine.rs b/folding-schemes/src/folding/circuits/nonnative/affine.rs index 906976ea..6fcce652 100644 --- a/folding-schemes/src/folding/circuits/nonnative/affine.rs +++ b/folding-schemes/src/folding/circuits/nonnative/affine.rs @@ -1,37 +1,37 @@ -use ark_ec::{short_weierstrass::SWFlags, AffineRepr, CurveGroup}; -use ark_ff::{Field, PrimeField}; +use ark_ec::{ + short_weierstrass::{Projective, SWCurveConfig, SWFlags}, + AffineRepr, CurveGroup, +}; +use ark_ff::PrimeField; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, eq::EqGadget, fields::fp::FpVar, prelude::Boolean, - R1CSVar, ToConstraintFieldGadget, + R1CSVar, }; use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; use ark_serialize::{CanonicalSerialize, CanonicalSerializeWithFlags}; -use ark_std::Zero; -use core::borrow::Borrow; +use ark_std::{borrow::Borrow, One, Zero}; use crate::{ - folding::traits::Inputize, + folding::traits::{Inputize, InputizeNonNative}, transcript::{AbsorbNonNative, AbsorbNonNativeGadget}, + Curve, Field, }; -use super::uint::{nonnative_field_to_field_elements, NonNativeUintVar}; +use super::uint::NonNativeUintVar; /// NonNativeAffineVar represents an elliptic curve point in Affine representation in the non-native /// field, over the constraint field. It is not intended to perform operations, but just to contain /// the affine coordinates in order to perform hash operations of the point. #[derive(Debug, Clone)] -pub struct NonNativeAffineVar { +pub struct NonNativeAffineVar { pub x: NonNativeUintVar, pub y: NonNativeUintVar, } -impl AllocVar for NonNativeAffineVar -where - C: CurveGroup, -{ +impl AllocVar for NonNativeAffineVar { fn new_variable>( cs: impl Into>, f: impl FnOnce() -> Result, @@ -41,18 +41,17 @@ where let cs = cs.into(); let affine = val.borrow().into_affine(); - let zero = (&C::BaseField::zero(), &C::BaseField::zero()); - let (x, y) = affine.xy().unwrap_or(zero); + let (x, y) = affine.xy().unwrap_or_default(); - let x = NonNativeUintVar::new_variable(cs.clone(), || Ok(*x), mode)?; - let y = NonNativeUintVar::new_variable(cs.clone(), || Ok(*y), mode)?; + let x = NonNativeUintVar::new_variable(cs.clone(), || Ok(x), mode)?; + let y = NonNativeUintVar::new_variable(cs.clone(), || Ok(y), mode)?; Ok(Self { x, y }) }) } } -impl R1CSVar for NonNativeAffineVar { +impl R1CSVar for NonNativeAffineVar { type Value = C; fn cs(&self) -> ConstraintSystemRef { @@ -60,16 +59,10 @@ impl R1CSVar for NonNativeAffineVar { } fn value(&self) -> Result { - debug_assert_eq!(C::BaseField::extension_degree(), 1); - - let x = ::BasePrimeField::from_le_bytes_mod_order( - &self.x.value()?.to_bytes_le(), - ); - let y = ::BasePrimeField::from_le_bytes_mod_order( - &self.y.value()?.to_bytes_le(), - ); + let x = C::BaseField::from_le_bytes_mod_order(&self.x.value()?.to_bytes_le()); + let y = C::BaseField::from_le_bytes_mod_order(&self.y.value()?.to_bytes_le()); // Below is a workaround to convert the `x` and `y` coordinates to a - // point. This is because the `CurveGroup` trait does not provide a + // point. This is because the `SonobeCurve` trait does not provide a // method to construct a point from `BaseField` elements. let mut bytes = vec![]; // `unwrap` below is safe because serialization of a `PrimeField` value @@ -90,22 +83,12 @@ impl R1CSVar for NonNativeAffineVar { .unwrap(); // `unwrap` below is safe because `bytes` is constructed from the `x` // and `y` coordinates of a valid point, and these coordinates are - // serialized in the same way as the `CurveGroup` implementation. + // serialized in the same way as the `SonobeCurve` implementation. Ok(C::deserialize_uncompressed_unchecked(&bytes[..]).unwrap()) } } -impl ToConstraintFieldGadget for NonNativeAffineVar { - // Used for converting `NonNativeAffineVar` to a vector of `FpVar` with minimum length in - // the circuit. - fn to_constraint_field(&self) -> Result>, SynthesisError> { - let x = self.x.to_constraint_field()?; - let y = self.y.to_constraint_field()?; - Ok([x, y].concat()) - } -} - -impl EqGadget for NonNativeAffineVar { +impl EqGadget for NonNativeAffineVar { fn is_eq(&self, other: &Self) -> Result, SynthesisError> { let mut result = Boolean::TRUE; if self.x.0.len() != other.x.0.len() { @@ -124,7 +107,7 @@ impl EqGadget for NonNativeAffineVar { if l.ub != r.ub { return Err(SynthesisError::Unsatisfiable); } - result = result.and(&l.v.is_eq(&r.v)?)?; + result &= l.v.is_eq(&r.v)?; } Ok(result) } @@ -152,33 +135,7 @@ impl EqGadget for NonNativeAffineVar { } } -/// The out-circuit counterpart of `NonNativeAffineVar::to_constraint_field` -#[allow(clippy::type_complexity)] -pub(crate) fn nonnative_affine_to_field_elements( - p: C, -) -> (Vec, Vec) { - let affine = p.into_affine(); - let zero = (&C::BaseField::zero(), &C::BaseField::zero()); - let (x, y) = affine.xy().unwrap_or(zero); - - let x = nonnative_field_to_field_elements(x); - let y = nonnative_field_to_field_elements(y); - (x, y) -} - -impl Inputize> for C { - fn inputize(&self) -> Vec { - let affine = self.into_affine(); - let zero = (&C::BaseField::zero(), &C::BaseField::zero()); - let (x, y) = affine.xy().unwrap_or(zero); - - let x = x.inputize(); - let y = y.inputize(); - [x, y].concat() - } -} - -impl NonNativeAffineVar { +impl NonNativeAffineVar { pub fn zero() -> Self { // `unwrap` below is safe because we are allocating a constant value, // which is guaranteed to succeed. @@ -186,29 +143,56 @@ impl NonNativeAffineVar { } } -impl AbsorbNonNative for C { - fn to_native_sponge_field_elements(&self, dest: &mut Vec) { - let (x, y) = nonnative_affine_to_field_elements(*self); - dest.extend(x); - dest.extend(y); +impl> AbsorbNonNative for Projective

{ + fn to_native_sponge_field_elements(&self, dest: &mut Vec) { + let affine = self.into_affine(); + let (x, y) = affine.xy().unwrap_or_default(); + + [x, y].to_native_sponge_field_elements(dest); } } -impl AbsorbNonNativeGadget for NonNativeAffineVar { +impl AbsorbNonNativeGadget for NonNativeAffineVar { fn to_native_sponge_field_elements( &self, ) -> Result>, SynthesisError> { - self.to_constraint_field() + [&self.x, &self.y].to_native_sponge_field_elements() + } +} + +impl> Inputize for Projective

{ + /// Returns the internal representation in the same order as how the value + /// is allocated in `ProjectiveVar::new_input`. + fn inputize(&self) -> Vec { + let affine = self.into_affine(); + match affine.xy() { + Some((x, y)) => vec![x, y, One::one()], + None => vec![Zero::zero(), One::one(), Zero::zero()], + } + } +} + +impl> InputizeNonNative for Projective

{ + /// Returns the internal representation in the same order as how the value + /// is allocated in `NonNativeAffineVar::new_input`. + fn inputize_nonnative(&self) -> Vec { + let affine = self.into_affine(); + let (x, y) = affine.xy().unwrap_or_default(); + + [x, y].inputize_nonnative() } } #[cfg(test)] mod tests { - use super::*; - use ark_pallas::{Fr, Projective}; + use ark_pallas::{Fq, Fr, PallasConfig, Projective}; + use ark_r1cs_std::groups::curves::short_weierstrass::ProjectiveVar; use ark_relations::r1cs::ConstraintSystem; use ark_std::UniformRand; + use super::*; + use crate::Error; + #[test] fn test_alloc_zero() { let cs = ConstraintSystem::::new_ref(); @@ -219,33 +203,39 @@ mod tests { } #[test] - fn test_improved_to_constraint_field() { + fn test_improved_to_hash_preimage() -> Result<(), Error> { let cs = ConstraintSystem::::new_ref(); // check that point_to_nonnative_limbs returns the expected values let mut rng = ark_std::test_rng(); let p = Projective::rand(&mut rng); - let pVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(p)).unwrap(); - let (x, y) = nonnative_affine_to_field_elements(p); + let pVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(p))?; assert_eq!( - pVar.to_constraint_field().unwrap().value().unwrap(), - [x, y].concat() + pVar.to_native_sponge_field_elements()?.value()?, + p.to_native_sponge_field_elements_as_vec() ); + Ok(()) } #[test] - fn test_inputize() { - let cs = ConstraintSystem::::new_ref(); - + fn test_inputize() -> Result<(), Error> { // check that point_to_nonnative_limbs returns the expected values let mut rng = ark_std::test_rng(); let p = Projective::rand(&mut rng); - let pVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(p)).unwrap(); - let xy = p.inputize(); + let cs = ConstraintSystem::::new_ref(); + let pVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(p))?; assert_eq!( - [pVar.x.0.value().unwrap(), pVar.y.0.value().unwrap()].concat(), - xy + [pVar.x.0.value()?, pVar.y.0.value()?].concat(), + p.inputize_nonnative() ); + + let cs = ConstraintSystem::::new_ref(); + let pVar = ProjectiveVar::>::new_witness(cs.clone(), || Ok(p))?; + assert_eq!( + vec![pVar.x.value()?, pVar.y.value()?, pVar.z.value()?], + p.inputize() + ); + Ok(()) } } diff --git a/folding-schemes/src/folding/circuits/nonnative/uint.rs b/folding-schemes/src/folding/circuits/nonnative/uint.rs index 3f167634..eafd8fa4 100644 --- a/folding-schemes/src/folding/circuits/nonnative/uint.rs +++ b/folding-schemes/src/folding/circuits/nonnative/uint.rs @@ -3,23 +3,25 @@ use std::{ cmp::{max, min}, }; -use ark_ff::{BigInteger, Field, One, PrimeField, Zero}; +use ark_ff::{BigInteger, Fp, FpConfig, One, PrimeField, Zero}; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, boolean::Boolean, + convert::ToBitsGadget, fields::{fp::FpVar, FieldVar}, prelude::EqGadget, select::CondSelectGadget, - R1CSVar, ToBitsGadget, ToConstraintFieldGadget, + R1CSVar, }; use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; use num_bigint::BigUint; use num_integer::Integer; use crate::{ - folding::traits::Inputize, + folding::traits::{Inputize, InputizeNonNative}, transcript::{AbsorbNonNative, AbsorbNonNativeGadget}, utils::gadgets::{EquivalenceGadget, MatrixGadget, SparseMatrixVar, VectorGadget}, + Field, }; /// `LimbVar` represents a single limb of a non-native unsigned integer in the @@ -37,12 +39,12 @@ pub struct LimbVar { impl]>> From for LimbVar { fn from(bits: B) -> Self { Self { - // `Boolean::le_bits_to_fp_var` will return an error if the internal + // `Boolean::le_bits_to_fp` will return an error if the internal // invocation of `Boolean::enforce_in_field_le` fails. // However, this method is only called when the length of `bits` is // greater than `F::MODULUS_BIT_SIZE`, which should not happen in // our case where `bits` is guaranteed to be short. - v: Boolean::le_bits_to_fp_var(bits.as_ref()).unwrap(), + v: Boolean::le_bits_to_fp(bits.as_ref()).unwrap(), ub: (BigUint::one() << bits.as_ref().len()) - BigUint::one(), } } @@ -165,7 +167,7 @@ impl ToBitsGadget for LimbVar { Vec::new_witness(cs, || Ok(bits))? }; - Boolean::le_bits_to_fp_var(&bits)?.enforce_equal(&self.v)?; + Boolean::le_bits_to_fp(&bits)?.enforce_equal(&self.v)?; Ok(bits) } @@ -201,8 +203,8 @@ impl NonNativeUintVar { // Thus, 55 allows us to compute `Az∘Bz` without the expensive alignment // operation. // - // TODO (@winderica): either make it a global const, or compute an - // optimal value based on the modulus size + // TODO: either make it a global const, or compute an optimal value + // based on the modulus size. 55 } } @@ -268,23 +270,6 @@ impl AllocVar for NonNativeUintVar { } } -impl Inputize> for T { - fn inputize(&self) -> Vec { - assert_eq!(T::extension_degree(), 1); - // `unwrap` is safe because `T` is a field with extension degree 1, and - // thus `T::to_base_prime_field_elements` should return an iterator with - // exactly one element. - self.to_base_prime_field_elements() - .next() - .unwrap() - .into_bigint() - .to_bits_le() - .chunks(NonNativeUintVar::::bits_per_limb()) - .map(|chunk| F::from(F::BigInt::from_bits_le(chunk))) - .collect() - } -} - impl R1CSVar for NonNativeUintVar { type Value = BigUint; @@ -518,20 +503,6 @@ impl ToBitsGadget for NonNativeUintVar { } } -impl ToConstraintFieldGadget for NonNativeUintVar { - fn to_constraint_field(&self) -> Result>, SynthesisError> { - let bits_per_limb = F::MODULUS_BIT_SIZE as usize - 1; - - let limbs = self - .to_bits_le()? - .chunks(bits_per_limb) - .map(Boolean::le_bits_to_fp_var) - .collect::, _>>()?; - - Ok(limbs) - } -} - impl CondSelectGadget for NonNativeUintVar { fn conditionally_select( cond: &Boolean, @@ -569,7 +540,7 @@ impl NonNativeUintVar { Vec::new_witness(cs, || Ok(bits))? }; - Boolean::le_bits_to_fp_var(&bits)?.enforce_equal(x)?; + Boolean::le_bits_to_fp(&bits)?.enforce_equal(x)?; Ok(bits) } @@ -610,7 +581,7 @@ impl NonNativeUintVar { )?; // Below is equivalent to but more efficient than - // `Boolean::le_bits_to_fp_var(&bits)?.enforce_equal(&is_neg.select(&x.negate()?, &x)?)?` + // `Boolean::le_bits_to_fp(&bits)?.enforce_equal(&is_neg.select(&x.negate()?, &x)?)?` // Note that this enforces: // 1. The claimed absolute value `is_neg.select(&x.negate()?, &x)?` has // exactly `length` bits. @@ -627,7 +598,7 @@ impl NonNativeUintVar { // `is_neg.select(&x.negate()?, &x)?` returns `x`, which is // greater than `(|F| - 1) / 2` and cannot fit in `length` // bits. - FpVar::from(is_neg).mul_equals(&x.double()?, &(x - Boolean::le_bits_to_fp_var(&bits)?))?; + FpVar::from(is_neg).mul_equals(&x.double()?, &(x - Boolean::le_bits_to_fp(&bits)?))?; Ok(bits) } @@ -828,59 +799,55 @@ impl]>> From for NonNativeUintVar { } } -// If we impl `AbsorbNonNative` directly for `PrimeField`, rustc will complain -// that this impl conflicts with the impl for `CurveGroup`. -// Therefore, we instead impl `AbsorbNonNative` for a slice of `PrimeField` as a -// workaround. -impl AbsorbNonNative - for [TargetField] -{ - fn to_native_sponge_field_elements(&self, dest: &mut Vec) { - self.iter() - .for_each(|x| dest.extend(&nonnative_field_to_field_elements(x))); +impl, const N: usize> AbsorbNonNative for Fp { + fn to_native_sponge_field_elements(&self, dest: &mut Vec) { + let bits_per_limb = F::MODULUS_BIT_SIZE as usize - 1; + let num_limbs = (Fp::::MODULUS_BIT_SIZE as usize).div_ceil(bits_per_limb); + + let mut limbs = self + .into_bigint() + .to_bits_le() + .chunks(bits_per_limb) + .map(|chunk| F::from(F::BigInt::from_bits_le(chunk))) + .collect::>(); + limbs.resize(num_limbs, F::zero()); + + dest.extend(&limbs) } } impl AbsorbNonNativeGadget for NonNativeUintVar { fn to_native_sponge_field_elements(&self) -> Result>, SynthesisError> { - self.to_constraint_field() + let bits_per_limb = F::MODULUS_BIT_SIZE as usize - 1; + + let limbs = self + .to_bits_le()? + .chunks(bits_per_limb) + .map(Boolean::le_bits_to_fp) + .collect::, _>>()?; + + Ok(limbs) } } -/// The out-circuit counterpart of `NonNativeUintVar::to_constraint_field` -pub(super) fn nonnative_field_to_field_elements( - f: &TargetField, -) -> Vec { - assert_eq!(TargetField::extension_degree(), 1); - // `unwrap` is safe because `TargetField` is a field with extension degree - // 1, and thus `TargetField::to_base_prime_field_elements` should return an - // iterator with exactly one element. - let bits = f - .to_base_prime_field_elements() - .next() - .unwrap() - .into_bigint() - .to_bits_le(); - - let bits_per_limb = BaseField::MODULUS_BIT_SIZE as usize - 1; - let num_limbs = - (TargetField::BasePrimeField::MODULUS_BIT_SIZE as usize).div_ceil(bits_per_limb); - - let mut limbs = bits - .chunks(bits_per_limb) - .map(|chunk| { - let mut limb = BaseField::zero(); - let mut w = BaseField::one(); - for &b in chunk.iter() { - limb += BaseField::from(b) * w; - w.double_in_place(); - } - limb - }) - .collect::>(); - limbs.resize(num_limbs, BaseField::zero()); +impl, const N: usize> Inputize for Fp { + /// Returns the internal representation in the same order as how the value + /// is allocated in `FpVar::new_input`. + fn inputize(&self) -> Vec { + vec![*self] + } +} - limbs +impl InputizeNonNative for P { + /// Returns the internal representation in the same order as how the value + /// is allocated in `NonNativeUintVar::new_input`. + fn inputize_nonnative(&self) -> Vec { + self.into_bigint() + .to_bits_le() + .chunks(NonNativeUintVar::::bits_per_limb()) + .map(|chunk| F::from(F::BigInt::from_bits_le(chunk))) + .collect() + } } impl VectorGadget> for [NonNativeUintVar] { @@ -945,16 +912,17 @@ impl MatrixGadget> for SparseMatrixVar Result<(), Box> { + fn test_mul_biguint() -> Result<(), Error> { let cs = ConstraintSystem::::new_ref(); let size = 256; @@ -990,7 +958,7 @@ mod tests { } #[test] - fn test_mul_fq() -> Result<(), Box> { + fn test_mul_fq() -> Result<(), Error> { let cs = ConstraintSystem::::new_ref(); let rng = &mut test_rng(); @@ -1021,7 +989,7 @@ mod tests { } #[test] - fn test_pow() -> Result<(), Box> { + fn test_pow() -> Result<(), Error> { let cs = ConstraintSystem::::new_ref(); let rng = &mut test_rng(); @@ -1041,7 +1009,7 @@ mod tests { } #[test] - fn test_vec_vec_mul() -> Result<(), Box> { + fn test_vec_vec_mul() -> Result<(), Error> { let cs = ConstraintSystem::::new_ref(); let len = 1000; diff --git a/folding-schemes/src/folding/circuits/sum_check.rs b/folding-schemes/src/folding/circuits/sum_check.rs index 75c7b43c..09a940b7 100644 --- a/folding-schemes/src/folding/circuits/sum_check.rs +++ b/folding-schemes/src/folding/circuits/sum_check.rs @@ -186,7 +186,7 @@ mod tests { use super::*; use crate::{ transcript::poseidon::poseidon_canonical_config, - utils::virtual_polynomial::VirtualPolynomial, + utils::virtual_polynomial::VirtualPolynomial, Error, }; pub type TestSumCheckProof = (VirtualPolynomial, PoseidonConfig, IOPProof); @@ -195,7 +195,7 @@ mod tests { /// Returns a random virtual polynomial, the poseidon config used and the associated sumcheck proof pub fn get_test_sumcheck_proof( num_vars: usize, - ) -> TestSumCheckProof { + ) -> Result, Error> { let mut rng = ark_std::test_rng(); let poseidon_config: PoseidonConfig = poseidon_canonical_config::(); let mut poseidon_transcript_prove = PoseidonSponge::::new(&poseidon_config); @@ -204,24 +204,22 @@ mod tests { let sum_check: IOPProof = IOPSumCheck::>::prove( &virtual_poly, &mut poseidon_transcript_prove, - ) - .unwrap(); - (virtual_poly, poseidon_config, sum_check) + )?; + Ok((virtual_poly, poseidon_config, sum_check)) } #[test] - fn test_sum_check_circuit() { + fn test_sum_check_circuit() -> Result<(), Error> { for num_vars in 1..15 { let cs = ConstraintSystem::::new_ref(); let (virtual_poly, poseidon_config, sum_check) = - get_test_sumcheck_proof::(num_vars); + get_test_sumcheck_proof::(num_vars)?; let mut poseidon_var: PoseidonSpongeVar = PoseidonSpongeVar::new(cs.clone(), &poseidon_config); - let iop_proof_var = - IOPProofVar::::new_witness(cs.clone(), || Ok(&sum_check)).unwrap(); + let iop_proof_var = IOPProofVar::::new_witness(cs.clone(), || Ok(&sum_check))?; let poly_aux_info_var = - VPAuxInfoVar::::new_witness(cs.clone(), || Ok(virtual_poly.aux_info)).unwrap(); - let enabled = Boolean::::new_witness(cs.clone(), || Ok(true)).unwrap(); + VPAuxInfoVar::::new_witness(cs.clone(), || Ok(virtual_poly.aux_info))?; + let enabled = Boolean::::new_witness(cs.clone(), || Ok(true))?; let res = SumCheckVerifierGadget::::verify( &iop_proof_var, &poly_aux_info_var, @@ -230,11 +228,11 @@ mod tests { ); assert!(res.is_ok()); - let (circuit_evals, r_challenges) = res.unwrap(); + let (circuit_evals, r_challenges) = res?; // 1. assert claim from circuit is equal to the one from the sum-check let claim: Fr = IOPSumCheck::>::extract_sum(&sum_check); - assert_eq!(circuit_evals[0].value().unwrap(), claim); + assert_eq!(circuit_evals[0].value()?, claim); // 2. assert that all in-circuit evaluations are equal to the ones from the sum-check for ((proof, point), circuit_eval) in sum_check @@ -246,15 +244,16 @@ mod tests { { let poly = DensePolynomial::from_coefficients_slice(&proof.coeffs); let eval = poly.evaluate(point); - assert_eq!(eval, circuit_eval.value().unwrap()); + assert_eq!(eval, circuit_eval.value()?); } // 3. assert that all challenges are equal to the ones from the sum-check for (point, r_challenge) in sum_check.point.iter().zip(r_challenges.iter()) { - assert_eq!(*point, r_challenge.value().unwrap()); + assert_eq!(*point, r_challenge.value()?); } - assert!(cs.is_satisfied().unwrap()); + assert!(cs.is_satisfied()?); } + Ok(()) } } diff --git a/folding-schemes/src/folding/circuits/utils.rs b/folding-schemes/src/folding/circuits/utils.rs index 5035e2a2..8bdd34c5 100644 --- a/folding-schemes/src/folding/circuits/utils.rs +++ b/folding-schemes/src/folding/circuits/utils.rs @@ -38,9 +38,10 @@ mod tests { use super::EqEvalGadget; use crate::utils::virtual_polynomial::eq_eval; + use crate::Error; #[test] - pub fn test_eq_eval_gadget() { + pub fn test_eq_eval_gadget() -> Result<(), Error> { let mut rng = test_rng(); let cs = ConstraintSystem::::new_ref(); @@ -49,15 +50,15 @@ mod tests { let y_vec: Vec = (0..i).map(|_| Fr::rand(&mut rng)).collect(); let x: Vec> = x_vec .iter() - .map(|x| FpVar::::new_witness(cs.clone(), || Ok(x)).unwrap()) - .collect(); + .map(|x| FpVar::::new_witness(cs.clone(), || Ok(x))) + .collect::, _>>()?; let y: Vec> = y_vec .iter() - .map(|y| FpVar::::new_witness(cs.clone(), || Ok(y)).unwrap()) - .collect(); - let expected_eq_eval = eq_eval::(&x_vec, &y_vec).unwrap(); - let gadget_eq_eval: FpVar = EqEvalGadget::::eq_eval(&x, &y).unwrap(); - assert_eq!(expected_eq_eval, gadget_eq_eval.value().unwrap()); + .map(|y| FpVar::::new_witness(cs.clone(), || Ok(y))) + .collect::, _>>()?; + let expected_eq_eval = eq_eval::(&x_vec, &y_vec)?; + let gadget_eq_eval: FpVar = EqEvalGadget::::eq_eval(&x, &y)?; + assert_eq!(expected_eq_eval, gadget_eq_eval.value()?); } let x: Vec> = vec![]; @@ -66,9 +67,9 @@ mod tests { assert!(gadget_eq_eval.is_err()); let x: Vec> = vec![]; - let y: Vec> = - vec![FpVar::::new_witness(cs.clone(), || Ok(&Fr::ONE)).unwrap()]; + let y: Vec> = vec![FpVar::::new_witness(cs.clone(), || Ok(&Fr::ONE))?]; let gadget_eq_eval = EqEvalGadget::::eq_eval(&x, &y); assert!(gadget_eq_eval.is_err()); + Ok(()) } } diff --git a/folding-schemes/src/folding/hypernova/cccs.rs b/folding-schemes/src/folding/hypernova/cccs.rs index d797ee76..3f830659 100644 --- a/folding-schemes/src/folding/hypernova/cccs.rs +++ b/folding-schemes/src/folding/hypernova/cccs.rs @@ -1,5 +1,4 @@ use ark_crypto_primitives::sponge::Absorb; -use ark_ec::CurveGroup; use ark_ff::PrimeField; use ark_serialize::CanonicalDeserialize; use ark_serialize::CanonicalSerialize; @@ -7,20 +6,19 @@ use ark_std::{rand::Rng, sync::Arc, One, Zero}; use super::circuits::CCCSVar; use super::Witness; -use crate::arith::{ccs::CCS, Arith}; +use crate::arith::{ccs::CCS, Arith, ArithRelation}; use crate::commitment::CommitmentScheme; use crate::folding::circuits::CF1; use crate::folding::traits::Inputize; use crate::folding::traits::{CommittedInstanceOps, Dummy}; -use crate::transcript::AbsorbNonNative; use crate::utils::mle::dense_vec_to_dense_mle; use crate::utils::vec::{is_zero_vec, mat_vec_mul}; use crate::utils::virtual_polynomial::{build_eq_x_r_vec, VirtualPolynomial}; -use crate::Error; +use crate::{Curve, Error}; /// Committed CCS instance #[derive(Debug, Clone, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] -pub struct CCCS { +pub struct CCCS { // Commitment to witness pub C: C, // Public input/output @@ -32,41 +30,38 @@ impl CCS { &self, rng: &mut R, cs_params: &CS::ProverParams, - z: &[C::ScalarField], - ) -> Result<(CCCS, Witness), Error> + z: &[F], + ) -> Result<(CCCS, Witness), Error> where // enforce that CCS's F is the C::ScalarField - C: CurveGroup, + C: Curve, { - let w: Vec = z[(1 + self.l)..].to_vec(); + let (w, x) = self.split_z(z); // if the commitment scheme is set to be hiding, set the random blinding parameter let r_w = if CS::is_hiding() { - C::ScalarField::rand(rng) + F::rand(rng) } else { - C::ScalarField::zero() + F::zero() }; let C = CS::commit(cs_params, &w, &r_w)?; - Ok(( - CCCS:: { - C, - x: z[1..(1 + self.l)].to_vec(), - }, - Witness:: { w, r_w }, - )) + Ok((CCCS:: { C, x }, Witness:: { w, r_w })) } /// Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) /// polynomial over x pub fn compute_q(&self, z: &[F]) -> Result, Error> { let mut q_x = VirtualPolynomial::::new(self.s); - for i in 0..self.q { + for (S_i, &c_i) in self.S.iter().zip(&self.c) { let mut Q_k = vec![]; - for &j in self.S[i].iter() { - Q_k.push(dense_vec_to_dense_mle(self.s, &mat_vec_mul(&self.M[j], z)?)); + for &j in S_i { + Q_k.push(Arc::new(dense_vec_to_dense_mle( + self.s, + &mat_vec_mul(&self.M[j], z)?, + ))); } - q_x.add_mle_list(Q_k.iter().map(|v| Arc::new(v.clone())), self.c[i])?; + q_x.add_mle_list(Q_k, c_i)?; } Ok(q_x) } @@ -76,31 +71,34 @@ impl CCS { /// polynomial over x pub fn compute_Q(&self, z: &[F], beta: &[F]) -> Result, Error> { let eq_beta = build_eq_x_r_vec(beta)?; - let eq_beta_mle = dense_vec_to_dense_mle(self.s, &eq_beta); + let eq_beta_mle = Arc::new(dense_vec_to_dense_mle(self.s, &eq_beta)); let mut Q = VirtualPolynomial::::new(self.s); - for i in 0..self.q { + for (S_i, &c_i) in self.S.iter().zip(&self.c) { let mut Q_k = vec![]; - for &j in self.S[i].iter() { - Q_k.push(dense_vec_to_dense_mle(self.s, &mat_vec_mul(&self.M[j], z)?)); + for &j in S_i { + Q_k.push(Arc::new(dense_vec_to_dense_mle( + self.s, + &mat_vec_mul(&self.M[j], z)?, + ))); } Q_k.push(eq_beta_mle.clone()); - Q.add_mle_list(Q_k.iter().map(|v| Arc::new(v.clone())), self.c[i])?; + Q.add_mle_list(Q_k, c_i)?; } Ok(Q) } } -impl Dummy<&CCS>> for CCCS { +impl Dummy<&CCS>> for CCCS { fn dummy(ccs: &CCS>) -> Self { Self { C: C::zero(), - x: vec![CF1::::zero(); ccs.l], + x: vec![CF1::::zero(); ccs.n_public_inputs()], } } } -impl Arith>, CCCS> for CCS> { +impl ArithRelation>, CCCS> for CCS> { type Evaluation = Vec>; fn eval_relation(&self, w: &Witness>, u: &CCCS) -> Result { @@ -122,26 +120,18 @@ impl Arith>, CCCS> for CCS> { } } -impl Absorb for CCCS -where - C::ScalarField: Absorb, -{ +impl Absorb for CCCS { fn to_sponge_bytes(&self, dest: &mut Vec) { C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest); } fn to_sponge_field_elements(&self, dest: &mut Vec) { - // We cannot call `to_native_sponge_field_elements(dest)` directly, as - // `to_native_sponge_field_elements` needs `F` to be `C::ScalarField`, - // but here `F` is a generic `PrimeField`. - self.C - .to_native_sponge_field_elements_as_vec() - .to_sponge_field_elements(dest); + self.C.to_native_sponge_field_elements(dest); self.x.to_sponge_field_elements(dest); } } -impl CommittedInstanceOps for CCCS { +impl CommittedInstanceOps for CCCS { type Var = CCCSVar; fn get_commitments(&self) -> Vec { @@ -153,9 +143,11 @@ impl CommittedInstanceOps for CCCS { } } -impl Inputize> for CCCS { - fn inputize(&self) -> Vec { - [&self.C.inputize()[..], &self.x].concat() +impl Inputize> for CCCS { + /// Returns the internal representation in the same order as how the value + /// is allocated in `CCCSVar::new_input`. + fn inputize(&self) -> Vec> { + [&self.C.inputize_nonnative()[..], &self.x].concat() } } @@ -172,38 +164,39 @@ pub mod tests { /// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the /// hypercube, but to not-zero outside the hypercube. #[test] - fn test_compute_q() { + fn test_compute_q() -> Result<(), Error> { let mut rng = test_rng(); let ccs = get_test_ccs::(); let z = get_test_z(3); - let q = ccs.compute_q(&z).unwrap(); + let q = ccs.compute_q(&z)?; // Evaluate inside the hypercube for x in BooleanHypercube::new(ccs.s) { - assert_eq!(Fr::zero(), q.evaluate(&x).unwrap()); + assert_eq!(Fr::zero(), q.evaluate(&x)?); } // Evaluate outside the hypercube let beta: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); - assert_ne!(Fr::zero(), q.evaluate(&beta).unwrap()); + assert_ne!(Fr::zero(), q.evaluate(&beta)?); + Ok(()) } /// Perform some sanity checks on Q(x). #[test] - fn test_compute_Q() { + fn test_compute_Q() -> Result<(), Error> { let mut rng = test_rng(); let ccs: CCS = get_test_ccs(); let z = get_test_z(3); let (w, x) = ccs.split_z(&z); - ccs.check_relation(&w, &x).unwrap(); + ccs.check_relation(&w, &x)?; let beta: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); // Compute Q(x) = eq(beta, x) * q(x). - let Q = ccs.compute_Q(&z, &beta).unwrap(); + let Q = ccs.compute_Q(&z, &beta)?; // Let's consider the multilinear polynomial G(x) = \sum_{y \in {0, 1}^s} eq(x, y) q(y) // which interpolates the multivariate polynomial q(x) inside the hypercube. @@ -218,44 +211,53 @@ pub mod tests { // Now sum Q(x) evaluations in the hypercube and expect it to be 0 let r = BooleanHypercube::new(ccs.s) - .map(|x| Q.evaluate(&x).unwrap()) + .map(|x| Q.evaluate(&x)) + .collect::, _>>()? + .into_iter() .fold(Fr::zero(), |acc, result| acc + result); assert_eq!(r, Fr::zero()); + Ok(()) } /// The polynomial G(x) (see above) interpolates q(x) inside the hypercube. /// Summing Q(x) over the hypercube is equivalent to evaluating G(x) at some point. /// This test makes sure that G(x) agrees with q(x) inside the hypercube, but not outside #[test] - fn test_Q_against_q() { + fn test_Q_against_q() -> Result<(), Error> { let mut rng = test_rng(); let ccs: CCS = get_test_ccs(); let z = get_test_z(3); let (w, x) = ccs.split_z(&z); - ccs.check_relation(&w, &x).unwrap(); + ccs.check_relation(&w, &x)?; // Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which // should be equal to q(d), since G(x) interpolates q(x) inside the hypercube - let q = ccs.compute_q(&z).unwrap(); + let q = ccs.compute_q(&z)?; for d in BooleanHypercube::new(ccs.s) { - let Q_at_d = ccs.compute_Q(&z, &d).unwrap(); + let Q_at_d = ccs.compute_Q(&z, &d)?; // Get G(d) by summing over Q_d(x) over the hypercube let G_at_d = BooleanHypercube::new(ccs.s) - .map(|x| Q_at_d.evaluate(&x).unwrap()) + .map(|x| Q_at_d.evaluate(&x)) + .collect::, _>>()? + .into_iter() .fold(Fr::zero(), |acc, result| acc + result); - assert_eq!(G_at_d, q.evaluate(&d).unwrap()); + assert_eq!(G_at_d, q.evaluate(&d)?); } // Now test that they should disagree outside of the hypercube let r: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); - let Q_at_r = ccs.compute_Q(&z, &r).unwrap(); + let Q_at_r = ccs.compute_Q(&z, &r)?; // Get G(d) by summing over Q_d(x) over the hypercube let G_at_r = BooleanHypercube::new(ccs.s) - .map(|x| Q_at_r.evaluate(&x).unwrap()) + .map(|x| Q_at_r.evaluate(&x)) + .collect::, _>>()? + .into_iter() .fold(Fr::zero(), |acc, result| acc + result); - assert_ne!(G_at_r, q.evaluate(&r).unwrap()); + + assert_ne!(G_at_r, q.evaluate(&r)?); + Ok(()) } } diff --git a/folding-schemes/src/folding/hypernova/circuits.rs b/folding-schemes/src/folding/hypernova/circuits.rs index 2ebc4ef0..094cd84c 100644 --- a/folding-schemes/src/folding/hypernova/circuits.rs +++ b/folding-schemes/src/folding/hypernova/circuits.rs @@ -1,11 +1,9 @@ /// Implementation of [HyperNova](https://eprint.iacr.org/2023/573.pdf) circuits use ark_crypto_primitives::sponge::{ constraints::{AbsorbGadget, CryptographicSpongeVar}, - poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, + poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge}, CryptographicSponge, }; -use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb}; -use ark_ec::{CurveGroup, Group}; use ark_ff::PrimeField; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, @@ -14,12 +12,12 @@ use ark_r1cs_std::{ fields::{fp::FpVar, FieldVar}, prelude::CurveVar, uint8::UInt8, - R1CSVar, ToConstraintFieldGadget, + R1CSVar, }; use ark_relations::r1cs::{ ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, Namespace, SynthesisError, }; -use ark_std::{fmt::Debug, ops::Neg, One, Zero}; +use ark_std::{fmt::Debug, iter::Sum, One, Zero}; use core::{borrow::Borrow, marker::PhantomData}; use super::{ @@ -28,6 +26,11 @@ use super::{ nimfs::{NIMFSProof, NIMFS}, HyperNovaCycleFoldConfig, Witness, }; +use crate::arith::{ + ccs::CCS, + r1cs::{extract_r1cs, R1CS}, + Arith, +}; use crate::constants::NOVA_N_BITS_RO; use crate::folding::{ circuits::{ @@ -44,26 +47,20 @@ use crate::folding::{ traits::{CommittedInstanceVarOps, Dummy}, }; use crate::frontend::FCircuit; +use crate::transcript::{AbsorbNonNativeGadget, TranscriptVar}; use crate::utils::virtual_polynomial::VPAuxInfo; -use crate::Error; -use crate::{ - arith::{ccs::CCS, r1cs::extract_r1cs}, - transcript::TranscriptVar, -}; +use crate::{Curve, Error}; /// Committed CCS instance #[derive(Debug, Clone)] -pub struct CCCSVar { +pub struct CCCSVar { // Commitment to witness pub C: NonNativeAffineVar, // Public io pub x: Vec>>, } -impl AllocVar, CF1> for CCCSVar -where - C: CurveGroup, -{ +impl AllocVar, CF1> for CCCSVar { fn new_variable>>( cs: impl Into>>, f: impl FnOnce() -> Result, @@ -81,7 +78,7 @@ where } } -impl CommittedInstanceVarOps for CCCSVar { +impl CommittedInstanceVarOps for CCCSVar { type PointVar = NonNativeAffineVar; fn get_commitments(&self) -> Vec { @@ -102,9 +99,19 @@ impl CommittedInstanceVarOps for CCCSVar { } } +impl AbsorbGadget for CCCSVar { + fn to_sponge_bytes(&self) -> Result>, SynthesisError> { + FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?) + } + + fn to_sponge_field_elements(&self) -> Result>, SynthesisError> { + Ok([&self.C.to_native_sponge_field_elements()?, &self.x[..]].concat()) + } +} + /// Linearized Committed CCS instance #[derive(Debug, Clone)] -pub struct LCCCSVar { +pub struct LCCCSVar { // Commitment to witness pub C: NonNativeAffineVar, // Relaxation factor of z for folded LCCCS @@ -117,10 +124,7 @@ pub struct LCCCSVar { pub v: Vec>>, } -impl AllocVar, CF1> for LCCCSVar -where - C: CurveGroup, -{ +impl AllocVar, CF1> for LCCCSVar { fn new_variable>>( cs: impl Into>>, f: impl FnOnce() -> Result, @@ -143,14 +147,14 @@ where } } -impl AbsorbGadget for LCCCSVar { +impl AbsorbGadget for LCCCSVar { fn to_sponge_bytes(&self) -> Result>, SynthesisError> { FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?) } fn to_sponge_field_elements(&self) -> Result>, SynthesisError> { Ok([ - &self.C.to_constraint_field()?, + &self.C.to_native_sponge_field_elements()?, &[self.u.clone()][..], &self.x, &self.r_x, @@ -160,7 +164,7 @@ impl AbsorbGadget for LCCCSVar { } } -impl CommittedInstanceVarOps for LCCCSVar { +impl CommittedInstanceVarOps for LCCCSVar { type PointVar = NonNativeAffineVar; fn get_commitments(&self) -> Vec { @@ -186,16 +190,12 @@ impl CommittedInstanceVarOps for LCCCSVar { /// ProofVar defines a multifolding proof #[derive(Debug)] -pub struct ProofVar { +pub struct ProofVar { pub sc_proof: IOPProofVar, #[allow(clippy::type_complexity)] pub sigmas_thetas: (Vec>>>, Vec>>>), } -impl AllocVar, CF1> for ProofVar -where - C: CurveGroup, - ::ScalarField: Absorb, -{ +impl AllocVar, CF1> for ProofVar { fn new_variable>>( cs: impl Into>>, f: impl FnOnce() -> Result, @@ -232,10 +232,10 @@ where } } -pub struct NIMFSGadget { +pub struct NIMFSGadget { _c: PhantomData, } -impl NIMFSGadget { +impl NIMFSGadget { /// Runs (in-circuit) the NIMFS.V, which outputs the new folded LCCCS instance together with /// the rho_powers, which will be used in other parts of the AugmentedFCircuit #[allow(clippy::type_complexity)] @@ -251,21 +251,8 @@ impl NIMFSGadget { enabled: Boolean, ) -> Result<(LCCCSVar, Vec>>), SynthesisError> { // absorb instances to transcript - for U_i in running_instances { - let v = [ - U_i.C.to_constraint_field()?, - vec![U_i.u.clone()], - U_i.x.clone(), - U_i.r_x.clone(), - U_i.v.clone(), - ] - .concat(); - transcript.absorb(&v)?; - } - for u_i in new_instances { - let v = [u_i.C.to_constraint_field()?, u_i.x.clone()].concat(); - transcript.absorb(&v)?; - } + transcript.absorb(&running_instances)?; + transcript.absorb(&new_instances)?; // get the challenges let gamma_scalar_raw = C::ScalarField::from_le_bytes_mod_order(b"gamma"); @@ -281,7 +268,7 @@ impl NIMFSGadget { let beta: Vec>> = transcript.get_challenges(ccs.s)?; let vp_aux_info_raw = VPAuxInfo:: { - max_degree: ccs.d + 1, + max_degree: ccs.degree() + 1, num_variables: ccs.s, phantom: PhantomData::, }; @@ -311,7 +298,6 @@ impl NIMFSGadget { // verify the claim c let computed_c = compute_c_gadget( - cs.clone(), ccs, proof.sigmas_thetas.0.clone(), // sigmas proof.sigmas_thetas.1.clone(), // thetas @@ -330,7 +316,7 @@ impl NIMFSGadget { let rho_scalar: FpVar> = FpVar::>::new_constant(cs.clone(), rho_scalar_raw)?; transcript.absorb(&rho_scalar)?; let rho_bits: Vec>> = transcript.get_challenge_nbits(NOVA_N_BITS_RO)?; - let rho = Boolean::le_bits_to_fp_var(&rho_bits)?; + let rho = Boolean::le_bits_to_fp(&rho_bits)?; // Self::fold will return the folded instance let folded_lcccs = Self::fold( @@ -420,7 +406,6 @@ impl NIMFSGadget { /// $$ #[allow(clippy::too_many_arguments)] fn compute_c_gadget( - cs: ConstraintSystemRef, ccs: &CCS, vec_sigmas: Vec>>, vec_thetas: Vec>>, @@ -437,24 +422,23 @@ fn compute_c_gadget( let mut c = FpVar::::zero(); let mut current_gamma = FpVar::::one(); for i in 0..vec_sigmas.len() { - for j in 0..ccs.t { - c += current_gamma.clone() * e_lcccs[i].clone() * vec_sigmas[i][j].clone(); + for sigma in &vec_sigmas[i] { + c += current_gamma.clone() * e_lcccs[i].clone() * sigma; current_gamma *= gamma.clone(); } } - let ccs_c = Vec::>::new_constant(cs.clone(), ccs.c.clone())?; let e_k = EqEvalGadget::eq_eval(&beta, &vec_r_x_prime)?; #[allow(clippy::needless_range_loop)] for k in 0..vec_thetas.len() { - let mut sum = FpVar::::zero(); - for i in 0..ccs.q { + let prods = ccs.S.iter().zip(&ccs.c).map(|(S_i, &c_i)| { let mut prod = FpVar::::one(); - for j in ccs.S[i].clone() { - prod *= vec_thetas[k][j].clone(); + for &j in S_i { + prod *= &vec_thetas[k][j]; } - sum += ccs_c[i].clone() * prod; - } + prod * c_i + }); + let sum = FpVar::sum(prods); c += current_gamma.clone() * e_k.clone() * sum; current_gamma *= gamma.clone(); } @@ -481,15 +465,12 @@ fn compute_c_gadget( /// * `NU` - the number of CCCS instances to be folded #[derive(Debug, Clone)] pub struct AugmentedFCircuit< - C1: CurveGroup, - C2: CurveGroup, - GC2: CurveVar>, + C1: Curve, + C2: Curve, FC: FCircuit>, const MU: usize, const NU: usize, > { - pub(super) _c2: PhantomData, - pub(super) _gc2: PhantomData, pub(super) poseidon_config: PoseidonConfig>, pub(super) ccs: CCS, // CCS of the AugmentedFCircuit pub(super) pp_hash: Option>, @@ -497,34 +478,26 @@ pub struct AugmentedFCircuit< pub(super) i_usize: Option, pub(super) z_0: Option>, pub(super) z_i: Option>, - pub(super) external_inputs: Option>, + pub(super) external_inputs: Option, pub(super) U_i: Option>, pub(super) Us: Option>>, // other U_i's to be folded that are not the main running instance pub(super) u_i_C: Option, // u_i.C pub(super) us: Option>>, // other u_i's to be folded that are not the main incoming instance pub(super) U_i1_C: Option, // U_{i+1}.C pub(super) F: FC, // F circuit - pub(super) x: Option>, // public input (u_{i+1}.x[0]) pub(super) nimfs_proof: Option>, // cyclefold verifier on C1 pub(super) cf_u_i_cmW: Option, // input, cf_u_i.cmW pub(super) cf_U_i: Option>, // input, RelaxedR1CS CycleFold instance - pub(super) cf_x: Option>, // public input (cf_u_{i+1}.x[1]) pub(super) cf_cmT: Option, } -impl AugmentedFCircuit +impl AugmentedFCircuit where - C1: CurveGroup, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit>, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, { pub fn default( poseidon_config: &PoseidonConfig>, @@ -535,8 +508,6 @@ where return Err(Error::CantBeZero("mu,nu".to_string())); } Ok(Self { - _c2: PhantomData, - _gc2: PhantomData, poseidon_config: poseidon_config.clone(), ccs, pp_hash: None, @@ -551,11 +522,9 @@ where us: None, U_i1_C: None, F: F_circuit, - x: None, nimfs_proof: None, cf_u_i_cmW: None, cf_U_i: None, - cf_x: None, cf_cmT: None, }) } @@ -565,32 +534,29 @@ where F: FC, // FCircuit ccs: Option>, ) -> Result { - let initial_ccs = CCS { - // m, n, s, s_prime and M will be overwritten by the `upper_bound_ccs' method - m: 0, - n: 0, - l: 2, // io_len - s: 1, - s_prime: 1, - t: 3, // note: this is only supports R1CS for the moment - q: 2, - d: 2, - S: vec![vec![0, 1], vec![2]], - c: vec![C1::ScalarField::one(), C1::ScalarField::one().neg()], - M: vec![], - }; + // create the initial ccs by converting from a dummy r1cs with m = 0, + // n = 0, and l = 2 (i.e., 0 constraints, and 0 variables, and 2 public + // inputs). + // Here, `m` and `n` will be overwritten by the `compute_concrete_ccs` + // method. + let mut initial_ccs = CCS::from(R1CS::dummy((0, 0, 2))); + // Although `s = log(m)` is undefined for `m = 0`, we set it to 1 here + // because the circuit internally calls `IOPSumCheck::extract_sum` which + // will panic if `s = 0` (0 is arkworks' fallback value for `log(0)`). + // Similarly, `s` will also be overwritten by `compute_concrete_ccs`. + initial_ccs.s = 1; let mut augmented_f_circuit = Self::default(poseidon_config, F, initial_ccs)?; augmented_f_circuit.ccs = ccs .ok_or(()) - .or_else(|_| augmented_f_circuit.upper_bound_ccs())?; + .or_else(|_| augmented_f_circuit.compute_concrete_ccs())?; Ok(augmented_f_circuit) } /// This method computes the CCS parameters. This is used because there is a circular - /// dependency between the AugmentedFCircuit CCS and the CCS parameters m & n & s & s'. + /// dependency between the AugmentedFCircuit CCS and the CCS parameters m & n & s. /// For a stable FCircuit circuit, the CCS parameters can be computed in advance and can be /// feed in as parameter for the AugmentedFCircuit::empty method to avoid computing them there. - pub fn upper_bound_ccs(&self) -> Result, Error> { + pub fn compute_concrete_ccs(&self) -> Result, Error> { let r1cs = get_r1cs_from_cs::>(self.clone())?; let mut ccs = CCS::from(r1cs); @@ -625,8 +591,6 @@ where )?; let augmented_f_circuit = Self { - _c2: PhantomData, - _gc2: PhantomData, poseidon_config: self.poseidon_config.clone(), ccs: ccs.clone(), pp_hash: Some(C1::ScalarField::zero()), @@ -634,19 +598,17 @@ where i_usize: Some(0), z_0: Some(z_0.clone()), z_i: Some(z_0.clone()), - external_inputs: Some(vec![C1::ScalarField::zero(); self.F.external_inputs_len()]), + external_inputs: Some(FC::ExternalInputs::default()), U_i: Some(U_i.clone()), Us: Some(Us), u_i_C: Some(u_i.C), us: Some(us), U_i1_C: Some(U_i1.C), F: self.F.clone(), - x: Some(C1::ScalarField::zero()), nimfs_proof: Some(nimfs_proof), // cyclefold values cf_u_i_cmW: None, cf_U_i: None, - cf_x: None, cf_cmT: None, }; @@ -667,11 +629,13 @@ where U_i = LCCCS::::dummy(&ccs); } Ok(ccs) - - // Ok(augmented_f_circuit.compute_cs_ccs()?.1) } - /// Returns the cs (ConstraintSystem) and the CCS out of the AugmentedFCircuit + /// Returns the cs (ConstraintSystem) and the CCS out of the AugmentedFCircuit. + /// Notice that in order to be able to internally call the `extract_r1cs` function, this method + /// calls the `cs.finalize` method which consumes a noticeable portion of the time. If the CCS + /// is not needed, directly generate the ConstraintSystem without calling the `finalize` method + /// will save computing time. #[allow(clippy::type_complexity)] pub fn compute_cs_ccs( &self, @@ -687,20 +651,16 @@ where } } -impl ConstraintSynthesizer> - for AugmentedFCircuit +impl AugmentedFCircuit where - C1: CurveGroup, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit>, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, { - fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { + pub fn compute_next_state( + self, + cs: ConstraintSystemRef>, + ) -> Result>>, SynthesisError> { let pp_hash = FpVar::>::new_witness(cs.clone(), || { Ok(self.pp_hash.unwrap_or_else(CF1::::zero)) })?; @@ -717,10 +677,8 @@ where .z_i .unwrap_or(vec![CF1::::zero(); self.F.state_len()])) })?; - let external_inputs = Vec::>>::new_witness(cs.clone(), || { - Ok(self - .external_inputs - .unwrap_or(vec![CF1::::zero(); self.F.external_inputs_len()])) + let external_inputs = FC::ExternalInputsVar::new_witness(cs.clone(), || { + Ok(self.external_inputs.unwrap_or_default()) })?; let U_dummy = LCCCS::::dummy(&self.ccs); @@ -744,15 +702,15 @@ where let cf_u_dummy = CycleFoldCommittedInstance::dummy(HyperNovaCycleFoldConfig::::IO_LEN); - let cf_U_i = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + let cf_U_i = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { Ok(self.cf_U_i.unwrap_or(cf_u_dummy.clone())) })?; - let cf_cmT = GC2::new_witness(cs.clone(), || Ok(self.cf_cmT.unwrap_or_else(C2::zero)))?; + let cf_cmT = C2::Var::new_witness(cs.clone(), || Ok(self.cf_cmT.unwrap_or_else(C2::zero)))?; let sponge = PoseidonSpongeVar::::new(cs.clone(), &self.poseidon_config); let is_basecase = i.is_zero()?; - let is_not_basecase = is_basecase.not(); + let is_not_basecase = !&is_basecase; // Primary Part // P.1. Compute u_i.x @@ -813,8 +771,17 @@ where &z_0, &z_i1, )?; - let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?; - x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?; + let x = is_basecase.select(&u_i1_x_base, &u_i1_x)?; + // This line "converts" `x` from a witness to a public input. + // Instead of directly modifying the constraint system, we explicitly + // allocate a public input and enforce that its value is indeed `x`. + // While comparing `x` with itself seems redundant, this is necessary + // because: + // - `.value()` allows an honest prover to extract public inputs without + // computing them outside the circuit. + // - `.enforce_equal()` prevents a malicious prover from claiming wrong + // public inputs that are not the honest `x` computed in-circuit. + FpVar::new_input(cs.clone(), || x.value())?.enforce_equal(&x)?; // convert rho_bits of the rho_vec to a `NonNativeFieldVar` let mut rho_bits_resized = rho_bits.clone(); @@ -840,14 +807,14 @@ where // ensure that cf_u has as public inputs the C from main instances U_i, u_i, U_i+1 // coordinates of the commitments. // C.2. Construct `cf_u_i` - let cf_u_i = CycleFoldCommittedInstanceVar:: { + let cf_u_i = CycleFoldCommittedInstanceVar:: { // cf1_u_i.cmE = 0. Notice that we enforce cmE to be equal to 0 since it is allocated // as 0. - cmE: GC2::zero(), + cmE: C2::Var::zero(), // cf1_u_i.u = 1 u: NonNativeUintVar::new_constant(cs.clone(), C1::BaseField::one())?, // cf_u_i.cmW is provided by the prover as witness - cmW: GC2::new_witness(cs.clone(), || Ok(self.cf_u_i_cmW.unwrap_or(C2::zero())))?, + cmW: C2::Var::new_witness(cs.clone(), || Ok(self.cf_u_i_cmW.unwrap_or(C2::zero())))?, // cf_u_i.x is computed in step 1 x: cf_x, }; @@ -855,7 +822,7 @@ where // C.3. nifs.verify (fold_committed_instance), obtains cf_U_{i+1} by folding cf_u_i & cf_U_i. // compute cf_r = H(cf_u_i, cf_U_i, cf_cmT) // cf_r_bits is denoted by rho* in the paper. - let cf_r_bits = CycleFoldChallengeGadget::::get_challenge_gadget( + let cf_r_bits = CycleFoldChallengeGadget::::get_challenge_gadget( &mut transcript, pp_hash.clone(), cf_U_i_vec, @@ -864,7 +831,7 @@ where )?; // Fold cf1_u_i & cf_U_i into cf1_U_{i+1} let cf_U_i1 = - NIFSFullGadget::::fold_committed_instance(cf_r_bits, cf_cmT, cf_U_i, cf_u_i)?; + NIFSFullGadget::::fold_committed_instance(cf_r_bits, cf_cmT, cf_U_i, cf_u_i)?; // Back to Primary Part // P.4.b compute and check the second output of F' @@ -872,31 +839,50 @@ where // Non-base case: u_{i+1}.x[1] == H(cf_U_{i+1}) let (cf_u_i1_x, _) = cf_U_i1.clone().hash(&sponge, pp_hash.clone())?; let (cf_u_i1_x_base, _) = - CycleFoldCommittedInstanceVar::::new_constant(cs.clone(), cf_u_dummy)? + CycleFoldCommittedInstanceVar::::new_constant(cs.clone(), cf_u_dummy)? .hash(&sponge, pp_hash)?; - let cf_x = FpVar::new_input(cs.clone(), || { - Ok(self.cf_x.unwrap_or(cf_u_i1_x_base.value()?)) - })?; - cf_x.enforce_equal(&is_basecase.select(&cf_u_i1_x_base, &cf_u_i1_x)?)?; + let cf_x = is_basecase.select(&cf_u_i1_x_base, &cf_u_i1_x)?; + // This line "converts" `cf_x` from a witness to a public input. + // Instead of directly modifying the constraint system, we explicitly + // allocate a public input and enforce that its value is indeed `cf_x`. + // While comparing `cf_x` with itself seems redundant, this is necessary + // because: + // - `.value()` allows an honest prover to extract public inputs without + // computing them outside the circuit. + // - `.enforce_equal()` prevents a malicious prover from claiming wrong + // public inputs that are not the honest `cf_x` computed in-circuit. + FpVar::new_input(cs.clone(), || cf_x.value())?.enforce_equal(&cf_x)?; + + Ok(z_i1) + } +} - Ok(()) +impl ConstraintSynthesizer> + for AugmentedFCircuit +where + C1: Curve, + C2: Curve, + FC: FCircuit>, +{ + fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { + self.compute_next_state(cs).map(|_| ()) } } #[cfg(test)] mod tests { - use ark_bn254::{constraints::GVar, Fq, Fr, G1Projective as Projective}; + use ark_bn254::{Fq, Fr, G1Projective as Projective}; + use ark_crypto_primitives::sponge::Absorb; use ark_ff::BigInteger; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; - use ark_std::{test_rng, UniformRand}; - use std::time::Instant; + use ark_grumpkin::Projective as Projective2; + use ark_std::{cmp::max, test_rng, time::Instant, UniformRand}; use super::*; use crate::{ arith::{ ccs::tests::{get_test_ccs, get_test_z}, r1cs::extract_w_x, - Arith, + ArithRelation, }, commitment::{pedersen::Pedersen, CommitmentScheme}, folding::{ @@ -907,13 +893,12 @@ mod tests { }, traits::CommittedInstanceOps, }, - frontend::utils::CubicFCircuit, + frontend::utils::{cubic_step_native, CubicFCircuit}, transcript::poseidon::poseidon_canonical_config, - utils::get_cm_coordinates, }; #[test] - pub fn test_compute_c_gadget() { + pub fn test_compute_c_gadget() -> Result<(), Error> { // number of LCCCS & CCCS instances to fold in a single step let mu = 32; let nu = 42; @@ -936,27 +921,27 @@ mod tests { let beta: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); let r_x_prime: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; // Create the LCCCS instances out of z_lcccs let mut lcccs_instances = Vec::new(); for z_i in z_lcccs.iter() { - let (inst, _) = ccs - .to_lcccs::<_, _, Pedersen, true>(&mut rng, &pedersen_params, z_i) - .unwrap(); + let (inst, _) = ccs.to_lcccs::<_, _, Pedersen, true>( + &mut rng, + &pedersen_params, + z_i, + )?; lcccs_instances.push(inst); } // Create the CCCS instance out of z_cccs let mut cccs_instances = Vec::new(); for z_i in z_cccs.iter() { - let (inst, _) = ccs - .to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, z_i) - .unwrap(); + let (inst, _) = + ccs.to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, z_i)?; cccs_instances.push(inst); } - let sigmas_thetas = compute_sigmas_thetas(&ccs, &z_lcccs, &z_cccs, &r_x_prime).unwrap(); + let sigmas_thetas = compute_sigmas_thetas(&ccs, &z_lcccs, &z_cccs, &r_x_prime)?; let expected_c = compute_c( &ccs, @@ -968,33 +953,30 @@ mod tests { .map(|lcccs| lcccs.r_x.clone()) .collect(), &r_x_prime, - ) - .unwrap(); + )?; let cs = ConstraintSystem::::new_ref(); let mut vec_sigmas = Vec::new(); let mut vec_thetas = Vec::new(); for sigmas in sigmas_thetas.0 { - vec_sigmas - .push(Vec::>::new_witness(cs.clone(), || Ok(sigmas.clone())).unwrap()); + vec_sigmas.push(Vec::>::new_witness(cs.clone(), || { + Ok(sigmas.clone()) + })?); } for thetas in sigmas_thetas.1 { - vec_thetas - .push(Vec::>::new_witness(cs.clone(), || Ok(thetas.clone())).unwrap()); + vec_thetas.push(Vec::>::new_witness(cs.clone(), || { + Ok(thetas.clone()) + })?); } let vec_r_x: Vec>> = lcccs_instances .iter() - .map(|lcccs| { - Vec::>::new_witness(cs.clone(), || Ok(lcccs.r_x.clone())).unwrap() - }) - .collect(); - let vec_r_x_prime = - Vec::>::new_witness(cs.clone(), || Ok(r_x_prime.clone())).unwrap(); - let gamma_var = FpVar::::new_witness(cs.clone(), || Ok(gamma)).unwrap(); - let beta_var = Vec::>::new_witness(cs.clone(), || Ok(beta.clone())).unwrap(); + .map(|lcccs| Vec::>::new_witness(cs.clone(), || Ok(lcccs.r_x.clone()))) + .collect::, _>>()?; + let vec_r_x_prime = Vec::>::new_witness(cs.clone(), || Ok(r_x_prime.clone()))?; + let gamma_var = FpVar::::new_witness(cs.clone(), || Ok(gamma))?; + let beta_var = Vec::>::new_witness(cs.clone(), || Ok(beta.clone()))?; let computed_c = compute_c_gadget( - cs.clone(), &ccs, vec_sigmas, vec_thetas, @@ -1002,22 +984,21 @@ mod tests { beta_var, vec_r_x, vec_r_x_prime, - ) - .unwrap(); + )?; - assert_eq!(expected_c, computed_c.value().unwrap()); + assert_eq!(expected_c, computed_c.value()?); + Ok(()) } /// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step, /// to verify the folding in the NIMFSGadget circuit #[test] - pub fn test_nimfs_gadget_verify() { + pub fn test_nimfs_gadget_verify() -> Result<(), Error> { let mut rng = test_rng(); // Create a basic CCS circuit let ccs = get_test_ccs::(); - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; let mu = 32; let nu = 42; @@ -1038,13 +1019,11 @@ mod tests { let mut lcccs_instances = Vec::new(); let mut w_lcccs = Vec::new(); for z_i in z_lcccs.iter() { - let (running_instance, w) = ccs - .to_lcccs::<_, _, Pedersen, false>( - &mut rng, - &pedersen_params, - z_i, - ) - .unwrap(); + let (running_instance, w) = ccs.to_lcccs::<_, _, Pedersen, false>( + &mut rng, + &pedersen_params, + z_i, + )?; lcccs_instances.push(running_instance); w_lcccs.push(w); } @@ -1052,9 +1031,8 @@ mod tests { let mut cccs_instances = Vec::new(); let mut w_cccs = Vec::new(); for z_i in z_cccs.iter() { - let (new_instance, w) = ccs - .to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, z_i) - .unwrap(); + let (new_instance, w) = + ccs.to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, z_i)?; cccs_instances.push(new_instance); w_cccs.push(w); } @@ -1072,8 +1050,7 @@ mod tests { &cccs_instances, &w_lcccs, &w_cccs, - ) - .unwrap(); + )?; // Verifier's transcript let mut transcript_v: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); @@ -1085,26 +1062,22 @@ mod tests { &lcccs_instances, &cccs_instances, proof.clone(), - ) - .unwrap(); + )?; assert_eq!(folded_lcccs, folded_lcccs_v); // Check that the folded LCCCS instance is a valid instance with respect to the folded witness - ccs.check_relation(&folded_witness, &folded_lcccs).unwrap(); + ccs.check_relation(&folded_witness, &folded_lcccs)?; // allocate circuit inputs let cs = ConstraintSystem::::new_ref(); let lcccs_instancesVar = - Vec::>::new_witness(cs.clone(), || Ok(lcccs_instances.clone())) - .unwrap(); + Vec::>::new_witness(cs.clone(), || Ok(lcccs_instances.clone()))?; let cccs_instancesVar = - Vec::>::new_witness(cs.clone(), || Ok(cccs_instances.clone())) - .unwrap(); - let proofVar = - ProofVar::::new_witness(cs.clone(), || Ok(proof.clone())).unwrap(); + Vec::>::new_witness(cs.clone(), || Ok(cccs_instances.clone()))?; + let proofVar = ProofVar::::new_witness(cs.clone(), || Ok(proof.clone()))?; let mut transcriptVar = PoseidonSpongeVar::::new(cs.clone(), &poseidon_config); - let enabled = Boolean::::new_witness(cs.clone(), || Ok(true)).unwrap(); + let enabled = Boolean::::new_witness(cs.clone(), || Ok(true))?; let (folded_lcccsVar, _) = NIMFSGadget::::verify( cs.clone(), &ccs, @@ -1113,46 +1086,48 @@ mod tests { &cccs_instancesVar, proofVar, enabled, - ) - .unwrap(); - assert!(cs.is_satisfied().unwrap()); - assert_eq!(folded_lcccsVar.u.value().unwrap(), folded_lcccs.u); + )?; + assert!(cs.is_satisfied()?); + assert_eq!(folded_lcccsVar.u.value()?, folded_lcccs.u); + Ok(()) } /// test that checks the native LCCCS.to_sponge_{bytes,field_elements} vs /// the R1CS constraints version #[test] - pub fn test_lcccs_to_sponge_preimage() { + pub fn test_lcccs_to_sponge_preimage() -> Result<(), Error> { let mut rng = test_rng(); let ccs = get_test_ccs(); let z1 = get_test_z::(3); - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; - let (lcccs, _) = ccs - .to_lcccs::<_, _, Pedersen, true>(&mut rng, &pedersen_params, &z1) - .unwrap(); + let (lcccs, _) = ccs.to_lcccs::<_, _, Pedersen, true>( + &mut rng, + &pedersen_params, + &z1, + )?; let bytes = lcccs.to_sponge_bytes_as_vec(); let field_elements = lcccs.to_sponge_field_elements_as_vec(); let cs = ConstraintSystem::::new_ref(); - let lcccsVar = LCCCSVar::::new_witness(cs.clone(), || Ok(lcccs)).unwrap(); - let bytes_var = lcccsVar.to_sponge_bytes().unwrap(); - let field_elements_var = lcccsVar.to_sponge_field_elements().unwrap(); + let lcccsVar = LCCCSVar::::new_witness(cs.clone(), || Ok(lcccs))?; + let bytes_var = lcccsVar.to_sponge_bytes()?; + let field_elements_var = lcccsVar.to_sponge_field_elements()?; - assert!(cs.is_satisfied().unwrap()); + assert!(cs.is_satisfied()?); // check that the natively computed and in-circuit computed hashes match - assert_eq!(bytes_var.value().unwrap(), bytes); - assert_eq!(field_elements_var.value().unwrap(), field_elements); + assert_eq!(bytes_var.value()?, bytes); + assert_eq!(field_elements_var.value()?, field_elements); + Ok(()) } /// test that checks the native LCCCS.hash vs the R1CS constraints version #[test] - pub fn test_lcccs_hash() { + pub fn test_lcccs_hash() -> Result<(), Error> { let mut rng = test_rng(); let poseidon_config = poseidon_canonical_config::(); let sponge = PoseidonSponge::::new(&poseidon_config); @@ -1160,38 +1135,39 @@ mod tests { let ccs = get_test_ccs(); let z1 = get_test_z::(3); - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; let pp_hash = Fr::from(42u32); // only for test let i = Fr::from(3_u32); let z_0 = vec![Fr::from(3_u32)]; let z_i = vec![Fr::from(3_u32)]; - let (lcccs, _) = ccs - .to_lcccs::<_, _, Pedersen, true>(&mut rng, &pedersen_params, &z1) - .unwrap(); + let (lcccs, _) = ccs.to_lcccs::<_, _, Pedersen, true>( + &mut rng, + &pedersen_params, + &z1, + )?; let h = lcccs.clone().hash(&sponge, pp_hash, i, &z_0, &z_i); let cs = ConstraintSystem::::new_ref(); let spongeVar = PoseidonSpongeVar::::new(cs.clone(), &poseidon_config); - let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash)).unwrap(); - let iVar = FpVar::::new_witness(cs.clone(), || Ok(i)).unwrap(); - let z_0Var = Vec::>::new_witness(cs.clone(), || Ok(z_0.clone())).unwrap(); - let z_iVar = Vec::>::new_witness(cs.clone(), || Ok(z_i.clone())).unwrap(); - let lcccsVar = LCCCSVar::::new_witness(cs.clone(), || Ok(lcccs)).unwrap(); + let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash))?; + let iVar = FpVar::::new_witness(cs.clone(), || Ok(i))?; + let z_0Var = Vec::>::new_witness(cs.clone(), || Ok(z_0.clone()))?; + let z_iVar = Vec::>::new_witness(cs.clone(), || Ok(z_i.clone()))?; + let lcccsVar = LCCCSVar::::new_witness(cs.clone(), || Ok(lcccs))?; let (hVar, _) = lcccsVar .clone() - .hash(&spongeVar, &pp_hashVar, &iVar, &z_0Var, &z_iVar) - .unwrap(); - assert!(cs.is_satisfied().unwrap()); + .hash(&spongeVar, &pp_hashVar, &iVar, &z_0Var, &z_iVar)?; + assert!(cs.is_satisfied()?); // check that the natively computed and in-circuit computed hashes match - assert_eq!(hVar.value().unwrap(), h); + assert_eq!(hVar.value()?, h); + Ok(()) } #[test] - pub fn test_augmented_f_circuit() { + pub fn test_augmented_f_circuit() -> Result<(), Error> { let mut rng = test_rng(); let poseidon_config = poseidon_canonical_config::(); let sponge = PoseidonSponge::::new(&poseidon_config); @@ -1200,34 +1176,35 @@ mod tests { const NU: usize = 3; let start = Instant::now(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let mut augmented_f_circuit = - AugmentedFCircuit::, MU, NU>::empty( + AugmentedFCircuit::, MU, NU>::empty( &poseidon_config, F_circuit, None, - ) - .unwrap(); + )?; let ccs = augmented_f_circuit.ccs.clone(); println!("AugmentedFCircuit & CCS generation: {:?}", start.elapsed()); - println!("CCS m x n: {} x {}", ccs.m, ccs.n); + println!("CCS m x n: {} x {}", ccs.n_constraints(), ccs.n_variables()); // CycleFold circuit let cs2 = ConstraintSystem::::new_ref(); - let cf_circuit = HyperNovaCycleFoldCircuit::::empty(); - cf_circuit.generate_constraints(cs2.clone()).unwrap(); + let cf_circuit = HyperNovaCycleFoldCircuit::::empty(); + cf_circuit.generate_constraints(cs2.clone())?; cs2.finalize(); - let cs2 = cs2 - .into_inner() - .ok_or(Error::NoInnerConstraintSystem) - .unwrap(); - let cf_r1cs = extract_r1cs::(&cs2).unwrap(); - println!("CF m x n: {} x {}", cf_r1cs.A.n_rows, cf_r1cs.A.n_cols); - - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); - let (cf_pedersen_params, _) = - Pedersen::::setup(&mut rng, cf_r1cs.A.n_cols - cf_r1cs.l - 1).unwrap(); + let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; + let cf_r1cs = extract_r1cs::(&cs2)?; + println!( + "CF m x n: {} x {}", + cf_r1cs.n_constraints(), + cf_r1cs.n_variables() + ); + + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; + let (cf_pedersen_params, _) = Pedersen::::setup( + &mut rng, + max(cf_r1cs.n_constraints(), cf_r1cs.n_witnesses()), + )?; // public params hash let pp_hash = Fr::from(42u32); // only for test @@ -1273,53 +1250,46 @@ mod tests { let all_Ws = [vec![W_i.clone()], Ws].concat(); let all_ws = [vec![w_i.clone()], ws].concat(); - let z_i1 = F_circuit.step_native(i, z_i.clone(), vec![]).unwrap(); + let z_i1 = cubic_step_native(z_i.clone()); let (U_i1, W_i1); + let u_i1_x; + let cf_u_i1_x; + if i == 0 { W_i1 = Witness::::dummy(&ccs); U_i1 = LCCCS::dummy(&ccs); - let u_i1_x = U_i1.hash(&sponge, pp_hash, Fr::one(), &z_0, &z_i1); + u_i1_x = U_i1.hash(&sponge, pp_hash, Fr::one(), &z_0, &z_i1); // hash the initial (dummy) CycleFold instance, which is used as the 2nd public // input in the AugmentedFCircuit - let cf_u_i1_x = cf_U_i.hash_cyclefold(&sponge, pp_hash); - - augmented_f_circuit = AugmentedFCircuit::< - Projective, - Projective2, - GVar2, - CubicFCircuit, - MU, - NU, - > { - _c2: PhantomData, - _gc2: PhantomData, - poseidon_config: poseidon_config.clone(), - ccs: ccs.clone(), - pp_hash: Some(pp_hash), - i: Some(Fr::zero()), - i_usize: Some(0), - z_0: Some(z_0.clone()), - z_i: Some(z_i.clone()), - external_inputs: Some(vec![]), - U_i: Some(U_i.clone()), - Us: Some(Us.clone()), - u_i_C: Some(u_i.C), - us: Some(us.clone()), - U_i1_C: Some(U_i1.C), - F: F_circuit, - x: Some(u_i1_x), - nimfs_proof: None, - - // cyclefold values - cf_u_i_cmW: None, - cf_U_i: None, - cf_x: Some(cf_u_i1_x), - cf_cmT: None, - }; + cf_u_i1_x = cf_U_i.hash_cyclefold(&sponge, pp_hash); + + augmented_f_circuit = + AugmentedFCircuit::, MU, NU> { + poseidon_config: poseidon_config.clone(), + ccs: ccs.clone(), + pp_hash: Some(pp_hash), + i: Some(Fr::zero()), + i_usize: Some(0), + z_0: Some(z_0.clone()), + z_i: Some(z_i.clone()), + external_inputs: Some(()), + U_i: Some(U_i.clone()), + Us: Some(Us.clone()), + u_i_C: Some(u_i.C), + us: Some(us.clone()), + U_i1_C: Some(U_i1.C), + F: F_circuit, + nimfs_proof: None, + + // cyclefold values + cf_u_i_cmW: None, + cf_U_i: None, + cf_cmT: None, + }; } else { let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&poseidon_config.clone()); @@ -1332,35 +1302,17 @@ mod tests { &all_us, &all_Ws, &all_ws, - ) - .unwrap(); + )?; // sanity check: check the folded instance relation - ccs.check_relation(&W_i1, &U_i1).unwrap(); + ccs.check_relation(&W_i1, &U_i1)?; - let u_i1_x = U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), &z_0, &z_i1); + u_i1_x = U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), &z_0, &z_i1); let rho_bits = rho.into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec(); - let rho_Fq = Fq::from_bigint(BigInteger::from_bits_le(&rho_bits)).unwrap(); // CycleFold part: - // get the vector used as public inputs 'x' in the CycleFold circuit - let cf_u_i_x = [ - vec![rho_Fq], - get_cm_coordinates(&U_i.C), - Us.iter() - .flat_map(|Us_i| get_cm_coordinates(&Us_i.C)) - .collect(), - get_cm_coordinates(&u_i.C), - us.iter() - .flat_map(|us_i| get_cm_coordinates(&us_i.C)) - .collect(), - get_cm_coordinates(&U_i1.C), - ] - .concat(); - - let cf_circuit = HyperNovaCycleFoldCircuit:: { - _gc: PhantomData, + let cf_circuit = HyperNovaCycleFoldCircuit:: { r_bits: Some(rho_bits.clone()), points: Some( [ @@ -1371,7 +1323,6 @@ mod tests { ] .concat(), ), - x: Some(cf_u_i_x.clone()), }; // ensure that the CycleFoldCircuit is well defined @@ -1380,12 +1331,9 @@ mod tests { HyperNovaCycleFoldConfig::::N_INPUT_POINTS ); - let (_cf_w_i, cf_u_i, cf_W_i1, cf_U_i1, cf_cmT, _) = fold_cyclefold_circuit::< + let (cf_u_i, cf_W_i1, cf_U_i1, cf_cmT) = fold_cyclefold_circuit::< HyperNovaCycleFoldConfig, - Projective, - GVar, Projective2, - GVar2, Pedersen, false, >( @@ -1395,73 +1343,67 @@ mod tests { pp_hash, cf_W_i.clone(), // CycleFold running instance witness cf_U_i.clone(), // CycleFold running instance - cf_u_i_x, // CycleFold incoming instance cf_circuit, &mut rng, - ) - .unwrap(); + )?; // hash the CycleFold folded instance, which is used as the 2nd public input in the // AugmentedFCircuit - let cf_u_i1_x = cf_U_i1.hash_cyclefold(&sponge, pp_hash); - - augmented_f_circuit = AugmentedFCircuit::< - Projective, - Projective2, - GVar2, - CubicFCircuit, - MU, - NU, - > { - _c2: PhantomData, - _gc2: PhantomData, - poseidon_config: poseidon_config.clone(), - ccs: ccs.clone(), - pp_hash: Some(pp_hash), - i: Some(iFr), - i_usize: Some(i), - z_0: Some(z_0.clone()), - z_i: Some(z_i.clone()), - external_inputs: Some(vec![]), - U_i: Some(U_i.clone()), - Us: Some(Us.clone()), - u_i_C: Some(u_i.C), - us: Some(us.clone()), - U_i1_C: Some(U_i1.C), - F: F_circuit, - x: Some(u_i1_x), - nimfs_proof: Some(nimfs_proof), - - // cyclefold values - cf_u_i_cmW: Some(cf_u_i.cmW), - cf_U_i: Some(cf_U_i), - cf_x: Some(cf_u_i1_x), - cf_cmT: Some(cf_cmT), - }; + cf_u_i1_x = cf_U_i1.hash_cyclefold(&sponge, pp_hash); + + augmented_f_circuit = + AugmentedFCircuit::, MU, NU> { + poseidon_config: poseidon_config.clone(), + ccs: ccs.clone(), + pp_hash: Some(pp_hash), + i: Some(iFr), + i_usize: Some(i), + z_0: Some(z_0.clone()), + z_i: Some(z_i.clone()), + external_inputs: Some(()), + U_i: Some(U_i.clone()), + Us: Some(Us.clone()), + u_i_C: Some(u_i.C), + us: Some(us.clone()), + U_i1_C: Some(U_i1.C), + F: F_circuit, + nimfs_proof: Some(nimfs_proof), + + // cyclefold values + cf_u_i_cmW: Some(cf_u_i.cmW), + cf_U_i: Some(cf_U_i), + cf_cmT: Some(cf_cmT), + }; // assign the next round instances cf_W_i = cf_W_i1; cf_U_i = cf_U_i1; } - let (cs, _) = augmented_f_circuit.compute_cs_ccs().unwrap(); - assert!(cs.is_satisfied().unwrap()); + let cs = ConstraintSystem::::new_ref(); + augmented_f_circuit + .clone() + .generate_constraints(cs.clone())?; + let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; + assert!(cs.is_satisfied()?); let (r1cs_w_i1, r1cs_x_i1) = extract_w_x::(&cs); // includes 1 and public inputs - assert_eq!(r1cs_x_i1[0], augmented_f_circuit.x.unwrap()); + assert_eq!(r1cs_x_i1[0], u_i1_x); let r1cs_z = [vec![Fr::one()], r1cs_x_i1.clone(), r1cs_w_i1.clone()].concat(); // compute committed instances, w_{i+1}, u_{i+1}, which will be used as w_i, u_i, so we // assign them directly to w_i, u_i. - (u_i, w_i) = ccs - .to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &r1cs_z) - .unwrap(); - ccs.check_relation(&w_i, &u_i).unwrap(); + (u_i, w_i) = ccs.to_cccs::<_, _, Pedersen, false>( + &mut rng, + &pedersen_params, + &r1cs_z, + )?; + ccs.check_relation(&w_i, &u_i)?; // sanity checks assert_eq!(w_i.w, r1cs_w_i1); assert_eq!(u_i.x, r1cs_x_i1); - assert_eq!(u_i.x[0], augmented_f_circuit.x.unwrap()); - assert_eq!(u_i.x[1], augmented_f_circuit.cf_x.unwrap()); + assert_eq!(u_i.x[0], u_i1_x); + assert_eq!(u_i.x[1], cf_u_i1_x); let expected_u_i1_x = U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), &z_0, &z_i1); let expected_cf_U_i1_x = cf_U_i.hash_cyclefold(&sponge, pp_hash); // u_i is already u_i1 at this point, check that has the expected value at x[0] @@ -1476,14 +1418,15 @@ mod tests { W_i = W_i1.clone(); // check the new LCCCS instance relation - ccs.check_relation(&W_i, &U_i).unwrap(); + ccs.check_relation(&W_i, &U_i)?; // check the new CCCS instance relation - ccs.check_relation(&w_i, &u_i).unwrap(); + ccs.check_relation(&w_i, &u_i)?; // check the CycleFold instance relation - cf_r1cs.check_relation(&cf_W_i, &cf_U_i).unwrap(); + cf_r1cs.check_relation(&cf_W_i, &cf_U_i)?; println!("augmented_f_circuit step {}: {:?}", i, start.elapsed()); } + Ok(()) } } diff --git a/folding-schemes/src/folding/hypernova/decider_eth.rs b/folding-schemes/src/folding/hypernova/decider_eth.rs index fef400a5..8389a1cd 100644 --- a/folding-schemes/src/folding/hypernova/decider_eth.rs +++ b/folding-schemes/src/folding/hypernova/decider_eth.rs @@ -1,8 +1,4 @@ /// This file implements the HyperNova's onchain (Ethereum's EVM) decider. -use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; -use ark_ff::PrimeField; -use ark_r1cs_std::{prelude::CurveVar, ToConstraintFieldGadget}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_snark::SNARK; use ark_std::rand::{CryptoRng, RngCore}; @@ -16,17 +12,16 @@ use crate::commitment::{ kzg::Proof as KZGProof, pedersen::Params as PedersenParams, CommitmentScheme, }; use crate::folding::circuits::decider::DeciderEnabledNIFS; -use crate::folding::circuits::CF2; use crate::folding::nova::decider_eth::VerifierParam; -use crate::folding::traits::{Inputize, WitnessOps}; +use crate::folding::traits::{Dummy, WitnessOps}; use crate::frontend::FCircuit; -use crate::Error; +use crate::{Curve, Error}; use crate::{Decider as DeciderTrait, FoldingScheme}; #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] pub struct Proof where - C1: CurveGroup, + C1: Curve, CS1: CommitmentScheme, S: SNARK, { @@ -41,11 +36,9 @@ where /// Onchain Decider, for ethereum use cases #[derive(Clone, Debug)] -pub struct Decider { +pub struct Decider { _c1: PhantomData, - _gc1: PhantomData, _c2: PhantomData, - _gc2: PhantomData, _fc: PhantomData, _cs1: PhantomData, _cs2: PhantomData, @@ -53,13 +46,11 @@ pub struct Decider, } -impl - DeciderTrait for Decider +impl DeciderTrait + for Decider where - C1: CurveGroup, - C2: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, // CS1 is a KZG commitment, where challenge is C1::Fr elem CS1: CommitmentScheme< @@ -72,19 +63,14 @@ where CS2: CommitmentScheme>, S: SNARK, FS: FoldingScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, // constrain FS into HyperNova, since this is a Decider specifically for HyperNova - HyperNova: From, + HyperNova: From, crate::folding::hypernova::ProverParams: From<>::ProverParam>, crate::folding::hypernova::VerifierParams: From<>::VerifierParam>, { - type PreprocessorParam = (FS::ProverParam, FS::VerifierParam); + type PreprocessorParam = ((FS::ProverParam, FS::VerifierParam), usize); type ProverParam = (S::ProvingKey, CS1::ProverParams); type Proof = Proof; type VerifierParam = VerifierParam; @@ -93,30 +79,39 @@ where fn preprocess( mut rng: impl RngCore + CryptoRng, - prep_param: Self::PreprocessorParam, - fs: FS, + ((pp, vp), state_len): Self::PreprocessorParam, ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { - let circuit = DeciderEthCircuit::::try_from(HyperNova::from(fs))?; - - // get the Groth16 specific setup for the circuit - let (g16_pk, g16_vk) = S::circuit_specific_setup(circuit, &mut rng) - .map_err(|e| Error::SNARKSetupFail(e.to_string()))?; - // get the FoldingScheme prover & verifier params from HyperNova - #[allow(clippy::type_complexity)] - let hypernova_pp: as FoldingScheme< + let hypernova_pp: as FoldingScheme< C1, C2, FC, - >>::ProverParam = prep_param.0.into(); - #[allow(clippy::type_complexity)] - let hypernova_vp: as FoldingScheme< + >>::ProverParam = pp.into(); + let hypernova_vp: as FoldingScheme< C1, C2, FC, - >>::VerifierParam = prep_param.1.into(); + >>::VerifierParam = vp.into(); let pp_hash = hypernova_vp.pp_hash()?; + let s = hypernova_vp.ccs.s; + let t = hypernova_vp.ccs.t; + + let circuit = DeciderEthCircuit::::dummy(( + hypernova_vp.ccs, + hypernova_vp.cf_r1cs, + hypernova_pp.cf_cs_pp, + hypernova_pp.poseidon_config, + (s, t, MU, NU), + (), + state_len, + 1, // HyperNova's LCCCS contains 1 commitment + )); + + // get the Groth16 specific setup for the circuit + let (g16_pk, g16_vk) = S::circuit_specific_setup(circuit, &mut rng) + .map_err(|e| Error::SNARKSetupFail(e.to_string()))?; + let pp = (g16_pk, hypernova_pp.cs_pp); let vp = Self::VerifierParam { @@ -134,7 +129,7 @@ where ) -> Result { let (snark_pk, cs_pk): (S::ProvingKey, CS1::ProverParams) = pp; - let circuit = DeciderEthCircuit::::try_from(HyperNova::from(folding_scheme))?; + let circuit = DeciderEthCircuit::::try_from(HyperNova::from(folding_scheme))?; let rho = circuit.randomness; @@ -202,7 +197,7 @@ where &[pp_hash, i][..], &z_0, &z_i, - &C.inputize(), + &C.inputize_nonnative(), &[proof.kzg_challenge, proof.kzg_proof.eval, proof.rho], ] .concat(); @@ -223,9 +218,9 @@ where #[cfg(test)] pub mod tests { - use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; + use ark_bn254::{Bn254, Fr, G1Projective as Projective}; use ark_groth16::Groth16; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_grumpkin::Projective as Projective2; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}; use super::*; @@ -238,15 +233,13 @@ pub mod tests { use crate::transcript::poseidon::poseidon_canonical_config; #[test] - fn test_decider() { + fn test_decider() -> Result<(), Error> { const MU: usize = 1; const NU: usize = 1; // use HyperNova as FoldingScheme type HN = HyperNova< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, Bn254>, Pedersen, @@ -256,9 +249,7 @@ pub mod tests { >; type D = Decider< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, Bn254>, Pedersen, @@ -271,26 +262,22 @@ pub mod tests { let mut rng = rand::rngs::OsRng; let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let z_0 = vec![Fr::from(3_u32)]; let prep_param = PreprocessorParam::new(poseidon_config, F_circuit); - let hypernova_params = HN::preprocess(&mut rng, &prep_param).unwrap(); + let hypernova_params = HN::preprocess(&mut rng, &prep_param)?; - let mut hypernova = HN::init(&hypernova_params, F_circuit, z_0.clone()).unwrap(); - hypernova - .prove_step(&mut rng, vec![], Some((vec![], vec![]))) - .unwrap(); - hypernova - .prove_step(&mut rng, vec![], Some((vec![], vec![]))) - .unwrap(); // do a 2nd step + let mut hypernova = HN::init(&hypernova_params, F_circuit, z_0.clone())?; + hypernova.prove_step(&mut rng, (), Some((vec![], vec![])))?; + hypernova.prove_step(&mut rng, (), Some((vec![], vec![])))?; // do a 2nd step // prepare the Decider prover & verifier params let (decider_pp, decider_vp) = - D::preprocess(&mut rng, hypernova_params, hypernova.clone()).unwrap(); + D::preprocess(&mut rng, (hypernova_params, F_circuit.state_len()))?; // decider proof generation - let proof = D::prove(rng, decider_pp, hypernova.clone()).unwrap(); + let proof = D::prove(rng, decider_pp, hypernova.clone())?; // decider proof verification let verified = D::verify( @@ -301,21 +288,19 @@ pub mod tests { &hypernova.U_i.get_commitments(), &hypernova.u_i.get_commitments(), &proof, - ) - .unwrap(); + )?; assert!(verified); + Ok(()) } #[test] - fn test_decider_serialization() { + fn test_decider_serialization() -> Result<(), Error> { const MU: usize = 1; const NU: usize = 1; // use HyperNova as FoldingScheme type HN = HyperNova< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, Bn254>, Pedersen, @@ -325,9 +310,7 @@ pub mod tests { >; type D = Decider< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, Bn254>, Pedersen, @@ -340,61 +323,51 @@ pub mod tests { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let z_0 = vec![Fr::from(3_u32)]; let prep_param = PreprocessorParam::new(poseidon_config.clone(), F_circuit); - let hypernova_params = HN::preprocess(&mut rng, &prep_param).unwrap(); - - let hypernova = HN::init(&hypernova_params, F_circuit, z_0.clone()).unwrap(); + let hypernova_params = HN::preprocess(&mut rng, &prep_param)?; let mut rng = rand::rngs::OsRng; // prepare the Decider prover & verifier params let (decider_pp, decider_vp) = - D::preprocess(&mut rng, hypernova_params.clone(), hypernova.clone()).unwrap(); + D::preprocess(&mut rng, (hypernova_params.clone(), F_circuit.state_len()))?; let mut hypernova_pp_serialized = vec![]; hypernova_params .0 .clone() - .serialize_compressed(&mut hypernova_pp_serialized) - .unwrap(); + .serialize_compressed(&mut hypernova_pp_serialized)?; let mut hypernova_vp_serialized = vec![]; hypernova_params .1 .clone() - .serialize_compressed(&mut hypernova_vp_serialized) - .unwrap(); + .serialize_compressed(&mut hypernova_vp_serialized)?; let hypernova_pp_deserialized = HN::pp_deserialize_with_mode( hypernova_pp_serialized.as_slice(), Compress::Yes, Validate::No, (), // FCircuit's Params - ) - .unwrap(); + )?; let hypernova_vp_deserialized = HN::vp_deserialize_with_mode( hypernova_vp_serialized.as_slice(), Compress::Yes, Validate::No, (), // FCircuit's Params - ) - .unwrap(); + )?; let hypernova_params = (hypernova_pp_deserialized, hypernova_vp_deserialized); - let mut hypernova = HN::init(&hypernova_params, F_circuit, z_0.clone()).unwrap(); + let mut hypernova = HN::init(&hypernova_params, F_circuit, z_0.clone())?; - hypernova - .prove_step(&mut rng, vec![], Some((vec![], vec![]))) - .unwrap(); - hypernova - .prove_step(&mut rng, vec![], Some((vec![], vec![]))) - .unwrap(); + hypernova.prove_step(&mut rng, (), Some((vec![], vec![])))?; + hypernova.prove_step(&mut rng, (), Some((vec![], vec![])))?; // decider proof generation - let proof = D::prove(rng, decider_pp, hypernova.clone()).unwrap(); + let proof = D::prove(rng, decider_pp, hypernova.clone())?; let verified = D::verify( decider_vp.clone(), @@ -404,8 +377,7 @@ pub mod tests { &hypernova.U_i.get_commitments(), &hypernova.u_i.get_commitments(), &proof, - ) - .unwrap(); + )?; assert!(verified); // The rest of this test will serialize the data and deserialize it back, and use it to @@ -413,33 +385,26 @@ pub mod tests { // serialize the verifier_params, proof and public inputs let mut decider_vp_serialized = vec![]; - decider_vp - .serialize_compressed(&mut decider_vp_serialized) - .unwrap(); + decider_vp.serialize_compressed(&mut decider_vp_serialized)?; let mut proof_serialized = vec![]; - proof.serialize_compressed(&mut proof_serialized).unwrap(); + proof.serialize_compressed(&mut proof_serialized)?; // serialize the public inputs in a single packet let mut public_inputs_serialized = vec![]; hypernova .i - .serialize_compressed(&mut public_inputs_serialized) - .unwrap(); + .serialize_compressed(&mut public_inputs_serialized)?; hypernova .z_0 - .serialize_compressed(&mut public_inputs_serialized) - .unwrap(); + .serialize_compressed(&mut public_inputs_serialized)?; hypernova .z_i - .serialize_compressed(&mut public_inputs_serialized) - .unwrap(); + .serialize_compressed(&mut public_inputs_serialized)?; hypernova .U_i - .serialize_compressed(&mut public_inputs_serialized) - .unwrap(); + .serialize_compressed(&mut public_inputs_serialized)?; hypernova .u_i - .serialize_compressed(&mut public_inputs_serialized) - .unwrap(); + .serialize_compressed(&mut public_inputs_serialized)?; // deserialize back the verifier_params, proof and public inputs let decider_vp_deserialized = @@ -447,21 +412,19 @@ pub mod tests { Projective, as CommitmentScheme>::VerifierParams, as SNARK>::VerifyingKey, - >::deserialize_compressed(&mut decider_vp_serialized.as_slice()) - .unwrap(); + >::deserialize_compressed(&mut decider_vp_serialized.as_slice())?; let proof_deserialized = Proof::, Groth16>::deserialize_compressed( &mut proof_serialized.as_slice(), - ) - .unwrap(); + )?; let mut reader = public_inputs_serialized.as_slice(); - let i_deserialized = Fr::deserialize_compressed(&mut reader).unwrap(); - let z_0_deserialized = Vec::::deserialize_compressed(&mut reader).unwrap(); - let z_i_deserialized = Vec::::deserialize_compressed(&mut reader).unwrap(); - let _U_i = LCCCS::::deserialize_compressed(&mut reader).unwrap(); - let _u_i = CCCS::::deserialize_compressed(&mut reader).unwrap(); + let i_deserialized = Fr::deserialize_compressed(&mut reader)?; + let z_0_deserialized = Vec::::deserialize_compressed(&mut reader)?; + let z_i_deserialized = Vec::::deserialize_compressed(&mut reader)?; + let _U_i = LCCCS::::deserialize_compressed(&mut reader)?; + let _u_i = CCCS::::deserialize_compressed(&mut reader)?; let verified = D::verify( decider_vp_deserialized, @@ -471,8 +434,8 @@ pub mod tests { &hypernova.U_i.get_commitments(), &hypernova.u_i.get_commitments(), &proof_deserialized, - ) - .unwrap(); + )?; assert!(verified); + Ok(()) } } diff --git a/folding-schemes/src/folding/hypernova/decider_eth_circuit.rs b/folding-schemes/src/folding/hypernova/decider_eth_circuit.rs index 71fa91b2..1f68b789 100644 --- a/folding-schemes/src/folding/hypernova/decider_eth_circuit.rs +++ b/folding-schemes/src/folding/hypernova/decider_eth_circuit.rs @@ -3,17 +3,14 @@ use ark_crypto_primitives::sponge::{ constraints::CryptographicSpongeVar, poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, - Absorb, CryptographicSponge, + CryptographicSponge, }; -use ark_ec::CurveGroup; use ark_ff::PrimeField; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, boolean::Boolean, eq::EqGadget, fields::fp::FpVar, - prelude::CurveVar, - ToConstraintFieldGadget, }; use ark_relations::r1cs::{Namespace, SynthesisError}; use ark_std::{borrow::Borrow, log2, marker::PhantomData}; @@ -23,7 +20,7 @@ use super::{ nimfs::{NIMFSProof, NIMFS}, HyperNova, Witness, CCCS, LCCCS, }; -use crate::folding::circuits::{decider::on_chain::GenericOnchainDeciderCircuit, CF1, CF2}; +use crate::folding::circuits::{decider::on_chain::GenericOnchainDeciderCircuit, CF1}; use crate::folding::traits::{WitnessOps, WitnessVarOps}; use crate::frontend::FCircuit; use crate::utils::gadgets::{eval_mle, MatrixGadget}; @@ -31,16 +28,17 @@ use crate::Error; use crate::{ arith::{ ccs::{circuits::CCSMatricesVar, CCS}, - ArithGadget, + ArithRelationGadget, }, folding::circuits::decider::{EvalGadget, KZGChallengesGadget}, }; use crate::{ commitment::{pedersen::Params as PedersenParams, CommitmentScheme}, folding::circuits::decider::DeciderEnabledNIFS, + Curve, }; -impl ArithGadget>, LCCCSVar> for CCSMatricesVar> { +impl ArithRelationGadget>, LCCCSVar> for CCSMatricesVar> { type Evaluation = Vec>>; fn eval_relation( @@ -100,10 +98,9 @@ impl WitnessVarOps for WitnessVar { } } -pub type DeciderEthCircuit = GenericOnchainDeciderCircuit< +pub type DeciderEthCircuit = GenericOnchainDeciderCircuit< C1, C2, - GC2, LCCCS, CCCS, Witness>, @@ -113,10 +110,8 @@ pub type DeciderEthCircuit = GenericOnchainDeciderCircuit< >; impl< - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, // enforce that the CS2 is Pedersen commitment scheme, since we're at Ethereum's EVM decider @@ -124,14 +119,11 @@ impl< const MU: usize, const NU: usize, const H: bool, - > TryFrom> - for DeciderEthCircuit -where - CF1: Absorb, + > TryFrom> for DeciderEthCircuit { type Error = Error; - fn try_from(hn: HyperNova) -> Result { + fn try_from(hn: HyperNova) -> Result { // compute the U_{i+1}, W_{i+1}, by folding the last running & incoming instances let mut transcript = PoseidonSponge::::new(&hn.poseidon_config); transcript.absorb(&hn.pp_hash); @@ -156,7 +148,6 @@ where .collect::, _>>()?; Ok(Self { - _gc2: PhantomData, _avar: PhantomData, arith: hn.ccs, cf_arith: hn.cf_r1cs, @@ -184,10 +175,8 @@ where pub struct DeciderHyperNovaGadget; -impl DeciderEnabledNIFS, CCCS, Witness, CCS>> +impl DeciderEnabledNIFS, CCCS, Witness, CCS>> for DeciderHyperNovaGadget -where - CF1: Absorb, { type ProofDummyCfg = (usize, usize, usize, usize); type Proof = NIMFSProof; @@ -217,7 +206,7 @@ where nimfs_proof, Boolean::TRUE, // enabled )?; - Boolean::le_bits_to_fp_var(&rho_bits)?.enforce_equal(&rho)?; + Boolean::le_bits_to_fp(&rho_bits)?.enforce_equal(&rho)?; Ok(computed_U_i1) } @@ -236,13 +225,13 @@ where #[cfg(test)] pub mod tests { - use ark_bn254::{constraints::GVar, Fr, G1Projective as Projective}; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_bn254::{Fr, G1Projective as Projective}; + use ark_grumpkin::Projective as Projective2; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; use ark_std::{test_rng, UniformRand}; use super::*; - use crate::arith::r1cs::R1CS; + use crate::arith::{r1cs::R1CS, Arith}; use crate::commitment::pedersen::Pedersen; use crate::folding::nova::PreprocessorParam; use crate::frontend::utils::CubicFCircuit; @@ -250,7 +239,7 @@ pub mod tests { use crate::FoldingScheme; #[test] - fn test_lcccs_checker_gadget() { + fn test_lcccs_checker_gadget() -> Result<(), Error> { let mut rng = test_rng(); let n_rows = 2_u32.pow(5) as usize; let n_cols = 2_u32.pow(5) as usize; @@ -258,31 +247,33 @@ pub mod tests { let ccs = CCS::from(r1cs); let z: Vec = (0..n_cols).map(|_| Fr::rand(&mut rng)).collect(); - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; - let (lcccs, w) = ccs - .to_lcccs::<_, Projective, Pedersen, false>(&mut rng, &pedersen_params, &z) - .unwrap(); + let (lcccs, w) = ccs.to_lcccs::<_, Projective, Pedersen, false>( + &mut rng, + &pedersen_params, + &z, + )?; let cs = ConstraintSystem::::new_ref(); // CCS's (sparse) matrices are constants in the circuit - let ccs_mat = CCSMatricesVar::::new_constant(cs.clone(), ccs.clone()).unwrap(); - let w_var = WitnessVar::new_witness(cs.clone(), || Ok(w)).unwrap(); - let lcccs_var = LCCCSVar::new_input(cs.clone(), || Ok(lcccs)).unwrap(); + let ccs_mat = CCSMatricesVar::::new_constant(cs.clone(), ccs.clone())?; + let w_var = WitnessVar::new_witness(cs.clone(), || Ok(w))?; + let lcccs_var = LCCCSVar::new_input(cs.clone(), || Ok(lcccs))?; - ccs_mat.enforce_relation(&w_var, &lcccs_var).unwrap(); + ccs_mat.enforce_relation(&w_var, &lcccs_var)?; - assert!(cs.is_satisfied().unwrap()); + assert!(cs.is_satisfied()?); + Ok(()) } #[test] - fn test_decider_circuit() { + fn test_decider_circuit() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let z_0 = vec![Fr::from(3_u32)]; const MU: usize = 1; @@ -290,9 +281,7 @@ pub mod tests { type HN = HyperNova< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, Pedersen, Pedersen, @@ -308,24 +297,24 @@ pub mod tests { Pedersen, false, >::new(poseidon_config, F_circuit); - let hn_params = HN::preprocess(&mut rng, &prep_param).unwrap(); + let hn_params = HN::preprocess(&mut rng, &prep_param)?; // generate a Nova instance and do a step of it - let mut hypernova = HN::init(&hn_params, F_circuit, z_0.clone()).unwrap(); - hypernova.prove_step(&mut rng, vec![], None).unwrap(); + let mut hypernova = HN::init(&hn_params, F_circuit, z_0.clone())?; + hypernova.prove_step(&mut rng, (), None)?; let ivc_proof = hypernova.ivc_proof(); - HN::verify(hn_params.1, ivc_proof).unwrap(); + HN::verify(hn_params.1, ivc_proof)?; // load the DeciderEthCircuit from the generated Nova instance - let decider_circuit = - DeciderEthCircuit::::try_from(hypernova).unwrap(); + let decider_circuit = DeciderEthCircuit::::try_from(hypernova)?; let cs = ConstraintSystem::::new_ref(); // generate the constraints and check that are satisfied by the inputs - decider_circuit.generate_constraints(cs.clone()).unwrap(); - assert!(cs.is_satisfied().unwrap()); + decider_circuit.generate_constraints(cs.clone())?; + assert!(cs.is_satisfied()?); dbg!(cs.num_constraints()); + Ok(()) } } diff --git a/folding-schemes/src/folding/hypernova/lcccs.rs b/folding-schemes/src/folding/hypernova/lcccs.rs index 8784d21e..1336cd49 100644 --- a/folding-schemes/src/folding/hypernova/lcccs.rs +++ b/folding-schemes/src/folding/hypernova/lcccs.rs @@ -1,8 +1,6 @@ use ark_crypto_primitives::sponge::Absorb; -use ark_ec::CurveGroup; use ark_ff::PrimeField; -use ark_poly::DenseMultilinearExtension; -use ark_poly::MultilinearExtension; +use ark_poly::Polynomial; use ark_serialize::CanonicalDeserialize; use ark_serialize::CanonicalSerialize; use ark_std::rand::Rng; @@ -11,19 +9,18 @@ use ark_std::Zero; use super::circuits::LCCCSVar; use super::Witness; use crate::arith::ccs::CCS; -use crate::arith::Arith; +use crate::arith::{Arith, ArithRelation}; use crate::commitment::CommitmentScheme; use crate::folding::circuits::CF1; use crate::folding::traits::Inputize; use crate::folding::traits::{CommittedInstanceOps, Dummy}; -use crate::transcript::AbsorbNonNative; use crate::utils::mle::dense_vec_to_dense_mle; use crate::utils::vec::mat_vec_mul; -use crate::Error; +use crate::{Curve, Error}; /// Linearized Committed CCS instance #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct LCCCS { +pub struct LCCCS { // Commitment to witness pub C: C, // Relaxation factor of z for folded LCCCS @@ -41,61 +38,59 @@ impl CCS { &self, rng: &mut R, cs_params: &CS::ProverParams, - z: &[C::ScalarField], - ) -> Result<(LCCCS, Witness), Error> + z: &[F], + ) -> Result<(LCCCS, Witness), Error> where // enforce that CCS's F is the C::ScalarField - C: CurveGroup, + C: Curve, { - let w: Vec = z[(1 + self.l)..].to_vec(); + let (w, x) = self.split_z(z); // if the commitment scheme is set to be hiding, set the random blinding parameter let r_w = if CS::is_hiding() { - C::ScalarField::rand(rng) + F::rand(rng) } else { - C::ScalarField::zero() + F::zero() }; let C = CS::commit(cs_params, &w, &r_w)?; - let r_x: Vec = (0..self.s).map(|_| C::ScalarField::rand(rng)).collect(); - - let Mzs: Vec> = self - .M - .iter() - .map(|M_j| Ok(dense_vec_to_dense_mle(self.s, &mat_vec_mul(M_j, z)?))) - .collect::>()?; + let r_x: Vec = (0..self.s).map(|_| F::rand(rng)).collect(); // compute v_j - let v: Vec = Mzs + let v = self + .M .iter() - .map(|Mz| Mz.evaluate(&r_x).ok_or(Error::EvaluationFail)) + .map(|M_j| { + let Mz = dense_vec_to_dense_mle(self.s, &mat_vec_mul(M_j, z)?); + Ok(Mz.evaluate(&r_x)) + }) .collect::>()?; Ok(( LCCCS:: { C, u: z[0], - x: z[1..(1 + self.l)].to_vec(), + x, r_x, v, }, - Witness:: { w, r_w }, + Witness:: { w, r_w }, )) } } -impl Dummy<&CCS>> for LCCCS { +impl Dummy<&CCS>> for LCCCS { fn dummy(ccs: &CCS>) -> Self { Self { C: C::zero(), u: CF1::::zero(), - x: vec![CF1::::zero(); ccs.l], + x: vec![CF1::::zero(); ccs.n_public_inputs()], r_x: vec![CF1::::zero(); ccs.s], v: vec![CF1::::zero(); ccs.t], } } } -impl Arith>, LCCCS> for CCS> { +impl ArithRelation>, LCCCS> for CCS> { type Evaluation = Vec>; /// Perform the check of the LCCCS instance described at section 4.2, @@ -107,7 +102,7 @@ impl Arith>, LCCCS> for CCS> { .iter() .map(|M_j| { let Mz_mle = dense_vec_to_dense_mle(self.s, &mat_vec_mul(M_j, &z)?); - Mz_mle.evaluate(&u.r_x).ok_or(Error::EvaluationFail) + Ok(Mz_mle.evaluate(&u.r_x)) }) .collect() } @@ -121,21 +116,13 @@ impl Arith>, LCCCS> for CCS> { } } -impl Absorb for LCCCS -where - C::ScalarField: Absorb, -{ +impl Absorb for LCCCS { fn to_sponge_bytes(&self, dest: &mut Vec) { C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest); } fn to_sponge_field_elements(&self, dest: &mut Vec) { - // We cannot call `to_native_sponge_field_elements(dest)` directly, as - // `to_native_sponge_field_elements` needs `F` to be `C::ScalarField`, - // but here `F` is a generic `PrimeField`. - self.C - .to_native_sponge_field_elements_as_vec() - .to_sponge_field_elements(dest); + self.C.to_native_sponge_field_elements(dest); self.u.to_sponge_field_elements(dest); self.x.to_sponge_field_elements(dest); self.r_x.to_sponge_field_elements(dest); @@ -143,7 +130,7 @@ where } } -impl CommittedInstanceOps for LCCCS { +impl CommittedInstanceOps for LCCCS { type Var = LCCCSVar; fn get_commitments(&self) -> Vec { @@ -155,10 +142,12 @@ impl CommittedInstanceOps for LCCCS { } } -impl Inputize> for LCCCS { - fn inputize(&self) -> Vec { +impl Inputize> for LCCCS { + /// Returns the internal representation in the same order as how the value + /// is allocated in `LCCCS::new_input`. + fn inputize(&self) -> Vec> { [ - &self.C.inputize(), + &self.C.inputize_nonnative(), &[self.u][..], &self.x, &self.r_x, @@ -171,48 +160,46 @@ impl Inputize> for LCCCS { #[cfg(test)] pub mod tests { use ark_pallas::{Fr, Projective}; - use ark_std::test_rng; - use ark_std::One; - use ark_std::UniformRand; - use std::sync::Arc; + use ark_std::{sync::Arc, test_rng, One, UniformRand}; use super::*; use crate::arith::{ ccs::tests::{get_test_ccs, get_test_z}, r1cs::R1CS, - Arith, + ArithRelation, }; use crate::commitment::pedersen::Pedersen; use crate::utils::hypercube::BooleanHypercube; use crate::utils::virtual_polynomial::{build_eq_x_r_vec, VirtualPolynomial}; // method for testing - pub fn compute_Ls( + pub fn compute_Ls( ccs: &CCS, lcccs: &LCCCS, z: &[C::ScalarField], - ) -> Vec> { - let eq_rx = build_eq_x_r_vec(&lcccs.r_x).unwrap(); - let eq_rx_mle = dense_vec_to_dense_mle(ccs.s, &eq_rx); - - let mut Ls = Vec::with_capacity(ccs.t); - for M_j in ccs.M.iter() { - let mut L = VirtualPolynomial::::new(ccs.s); - let mut Mz = vec![dense_vec_to_dense_mle(ccs.s, &mat_vec_mul(M_j, z).unwrap())]; - Mz.push(eq_rx_mle.clone()); - L.add_mle_list( - Mz.iter().map(|v| Arc::new(v.clone())), - C::ScalarField::one(), - ) - .unwrap(); - Ls.push(L); - } - Ls + ) -> Result>, Error> { + let eq_rx = build_eq_x_r_vec(&lcccs.r_x)?; + let eq_rx_mle = Arc::new(dense_vec_to_dense_mle(ccs.s, &eq_rx)); + + let Ls = ccs + .M + .iter() + .map(|M_j| { + let mut L = VirtualPolynomial::::new(ccs.s); + let Mz = vec![ + Arc::new(dense_vec_to_dense_mle(ccs.s, &mat_vec_mul(M_j, z)?)), + eq_rx_mle.clone(), + ]; + L.add_mle_list(Mz, C::ScalarField::one())?; + Ok(L) + }) + .collect::, Error>>()?; + Ok(Ls) } #[test] /// Test linearized CCCS v_j against the L_j(x) - fn test_lcccs_v_j() { + fn test_lcccs_v_j() -> Result<(), Error> { let mut rng = test_rng(); let n_rows = 2_u32.pow(5) as usize; @@ -221,39 +208,39 @@ pub mod tests { let ccs = CCS::from(r1cs); let z: Vec = (0..n_cols).map(|_| Fr::rand(&mut rng)).collect(); - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; - let (lcccs, _) = ccs - .to_lcccs::<_, Projective, Pedersen, false>( - &mut rng, - &pedersen_params, - &z, - ) - .unwrap(); + let (lcccs, _) = ccs.to_lcccs::<_, Projective, Pedersen, false>( + &mut rng, + &pedersen_params, + &z, + )?; // with our test vector coming from R1CS, v should have length 3 assert_eq!(lcccs.v.len(), 3); - let vec_L_j_x = compute_Ls(&ccs, &lcccs, &z); + let vec_L_j_x = compute_Ls(&ccs, &lcccs, &z)?; assert_eq!(vec_L_j_x.len(), lcccs.v.len()); for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) { let sum_L_j_x = BooleanHypercube::new(ccs.s) - .map(|y| L_j_x.evaluate(&y).unwrap()) + .map(|y| L_j_x.evaluate(&y)) + .collect::, _>>()? + .into_iter() .fold(Fr::zero(), |acc, result| acc + result); assert_eq!(v_i, sum_L_j_x); } + Ok(()) } /// Given a bad z, check that the v_j should not match with the L_j(x) #[test] - fn test_bad_v_j() { + fn test_bad_v_j() -> Result<(), Error> { let mut rng = test_rng(); let ccs = get_test_ccs(); let z = get_test_z(3); let (w, x) = ccs.split_z(&z); - ccs.check_relation(&w, &x).unwrap(); + ccs.check_relation(&w, &x)?; // Mutate z so that the relation does not hold let mut bad_z = z.clone(); @@ -261,17 +248,18 @@ pub mod tests { let (bad_w, bad_x) = ccs.split_z(&bad_z); assert!(ccs.check_relation(&bad_w, &bad_x).is_err()); - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; // Compute v_j with the right z - let (lcccs, _) = ccs - .to_lcccs::<_, Projective, Pedersen, false>(&mut rng, &pedersen_params, &z) - .unwrap(); + let (lcccs, _) = ccs.to_lcccs::<_, Projective, Pedersen, false>( + &mut rng, + &pedersen_params, + &z, + )?; // with our test vector coming from R1CS, v should have length 3 assert_eq!(lcccs.v.len(), 3); // Bad compute L_j(x) with the bad z - let vec_L_j_x = compute_Ls(&ccs, &lcccs, &bad_z); + let vec_L_j_x = compute_Ls(&ccs, &lcccs, &bad_z)?; assert_eq!(vec_L_j_x.len(), lcccs.v.len()); // Make sure that the LCCCS is not satisfied given these L_j(x) @@ -279,12 +267,15 @@ pub mod tests { let mut satisfied = true; for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) { let sum_L_j_x = BooleanHypercube::new(ccs.s) - .map(|y| L_j_x.evaluate(&y).unwrap()) + .map(|y| L_j_x.evaluate(&y)) + .collect::, _>>()? + .into_iter() .fold(Fr::zero(), |acc, result| acc + result); if v_i != sum_L_j_x { satisfied = false; } } assert!(!satisfied); + Ok(()) } } diff --git a/folding-schemes/src/folding/hypernova/mod.rs b/folding-schemes/src/folding/hypernova/mod.rs index 3bb1adc3..dc1807ef 100644 --- a/folding-schemes/src/folding/hypernova/mod.rs +++ b/folding-schemes/src/folding/hypernova/mod.rs @@ -1,13 +1,13 @@ /// Implements the scheme described in [HyperNova](https://eprint.iacr.org/2023/573.pdf) use ark_crypto_primitives::sponge::{ poseidon::{PoseidonConfig, PoseidonSponge}, - Absorb, CryptographicSponge, + CryptographicSponge, }; -use ark_ec::{CurveGroup, Group}; use ark_ff::{BigInteger, PrimeField}; -use ark_r1cs_std::{prelude::CurveVar, ToConstraintFieldGadget}; +use ark_r1cs_std::R1CSVar; +use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, SerializationError}; -use ark_std::{fmt::Debug, marker::PhantomData, rand::RngCore, One, Zero}; +use ark_std::{cmp::max, fmt::Debug, marker::PhantomData, rand::RngCore, One, Zero}; pub mod cccs; pub mod circuits; @@ -23,50 +23,43 @@ use decider_eth_circuit::WitnessVar; use lcccs::LCCCS; use nimfs::NIMFS; +use crate::arith::{ + ccs::CCS, + r1cs::{extract_w_x, R1CS}, + Arith, ArithRelation, +}; use crate::commitment::CommitmentScheme; use crate::constants::NOVA_N_BITS_RO; use crate::folding::{ - circuits::{ - cyclefold::{ - fold_cyclefold_circuit, CycleFoldCircuit, CycleFoldCommittedInstance, CycleFoldConfig, - CycleFoldWitness, - }, - CF2, + circuits::cyclefold::{ + fold_cyclefold_circuit, CycleFoldCircuit, CycleFoldCommittedInstance, CycleFoldConfig, + CycleFoldWitness, }, nova::{get_r1cs_from_cs, PreprocessorParam}, traits::{CommittedInstanceOps, Dummy, WitnessOps}, }; use crate::frontend::FCircuit; use crate::transcript::poseidon::poseidon_canonical_config; -use crate::utils::{get_cm_coordinates, pp_hash}; -use crate::Error; -use crate::{ - arith::{ - ccs::CCS, - r1cs::{extract_w_x, R1CS}, - Arith, - }, - FoldingScheme, MultiFolding, -}; +use crate::utils::pp_hash; +use crate::{Curve, Error, FoldingScheme, MultiFolding}; /// Configuration for HyperNova's CycleFold circuit -pub struct HyperNovaCycleFoldConfig { +pub struct HyperNovaCycleFoldConfig { _c: PhantomData, } -impl CycleFoldConfig +impl CycleFoldConfig for HyperNovaCycleFoldConfig { const RANDOMNESS_BIT_LENGTH: usize = NOVA_N_BITS_RO; const N_INPUT_POINTS: usize = MU + NU; type C = C; - type F = C::BaseField; } /// CycleFold circuit for computing random linear combinations of group elements /// in HyperNova instances. -pub type HyperNovaCycleFoldCircuit = - CycleFoldCircuit, GC>; +pub type HyperNovaCycleFoldCircuit = + CycleFoldCircuit>; /// Witness for the LCCCS & CCCS, containing the w vector, and the r_w used as randomness in the Pedersen commitment. #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] @@ -85,7 +78,7 @@ impl Witness { impl Dummy<&CCS> for Witness { fn dummy(ccs: &CCS) -> Self { - Self::new(vec![F::zero(); ccs.n - ccs.l - 1]) + Self::new(vec![F::zero(); ccs.n_witnesses()]) } } @@ -101,8 +94,8 @@ impl WitnessOps for Witness { #[derive(Debug, Clone)] pub struct ProverParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -118,8 +111,8 @@ where } impl< - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, const H: bool, @@ -142,8 +135,8 @@ impl< /// Verification parameters for HyperNova-based IVC #[derive(Debug, Clone)] pub struct VerifierParams< - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, const H: bool, @@ -162,8 +155,8 @@ pub struct VerifierParams< impl CanonicalSerialize for VerifierParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -183,8 +176,8 @@ where impl VerifierParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -203,8 +196,8 @@ where #[derive(PartialEq, Eq, Debug, Clone, CanonicalSerialize, CanonicalDeserialize)] pub struct IVCProof where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, { pub i: C1::ScalarField, pub z_0: Vec, @@ -225,30 +218,14 @@ where /// * `MU` - the number of LCCCS instances to be folded /// * `NU` - the number of CCCS instances to be folded #[derive(Clone, Debug)] -pub struct HyperNova< - C1, - GC1, - C2, - GC2, - FC, - CS1, - CS2, - const MU: usize, - const NU: usize, - const H: bool, -> where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar>, +pub struct HyperNova +where + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, { - _gc1: PhantomData, - _c2: PhantomData, - _gc2: PhantomData, - /// CCS of the Augmented Function circuit pub ccs: CCS, /// R1CS of the CycleFold circuit @@ -278,21 +255,15 @@ pub struct HyperNova< pub cf_U_i: CycleFoldCommittedInstance, } -impl - MultiFolding for HyperNova +impl MultiFolding + for HyperNova where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, + C1: Curve, { type RunningInstance = (LCCCS, Witness); type IncomingInstance = (CCCS, Witness); @@ -305,7 +276,7 @@ where &self, mut rng: impl RngCore, state: Vec, - external_inputs: Vec, + external_inputs: FC::ExternalInputs, ) -> Result { let r1cs_z = self.new_instance_generic(state, external_inputs)?; // compute committed instances, w_{i+1}, u_{i+1}, which will be used as w_i, u_i, so we @@ -327,7 +298,7 @@ where &self, mut rng: impl RngCore, state: Vec, - external_inputs: Vec, + external_inputs: FC::ExternalInputs, ) -> Result { let r1cs_z = self.new_instance_generic(state, external_inputs)?; // compute committed instances, w_{i+1}, u_{i+1}, which will be used as w_i, u_i, so we @@ -343,28 +314,22 @@ where } } -impl - HyperNova +impl + HyperNova where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, + C1: Curve, { /// internal helper for new_running_instance & new_incoming_instance methods, returns the R1CS /// z=[u,x,w] vector to be used to create the LCCCS & CCCS fresh instances. fn new_instance_generic( &self, state: Vec, - external_inputs: Vec, + external_inputs: FC::ExternalInputs, ) -> Result, Error> { // prepare the initial dummy instances let U_i = LCCCS::::dummy(&self.ccs); @@ -386,24 +351,10 @@ where ]; let us = vec![u_i.clone(); NU - 1]; - let z_i1 = self - .F - .step_native(0, state.clone(), external_inputs.clone())?; - // compute u_{i+1}.x let U_i1 = LCCCS::dummy(&self.ccs); - let u_i1_x = U_i1.hash( - &sponge, - self.pp_hash, - C1::ScalarField::one(), // i+1, where i=0 - &self.z_0, - &z_i1, - ); - - let cf_u_i1_x = cf_U_i.hash_cyclefold(&sponge, self.pp_hash); - let augmented_f_circuit = AugmentedFCircuit:: { - _c2: PhantomData, - _gc2: PhantomData, + + let augmented_f_circuit = AugmentedFCircuit:: { poseidon_config: self.poseidon_config.clone(), ccs: self.ccs.clone(), pp_hash: Some(self.pp_hash), @@ -418,26 +369,23 @@ where us: Some(us), U_i1_C: Some(U_i1.C), F: self.F.clone(), - x: Some(u_i1_x), nimfs_proof: None, // cyclefold values cf_u_i_cmW: None, cf_U_i: None, - cf_x: Some(cf_u_i1_x), cf_cmT: None, }; - let (cs, _) = augmented_f_circuit.compute_cs_ccs()?; + let cs = ConstraintSystem::::new_ref(); + augmented_f_circuit.generate_constraints(cs.clone())?; + let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; #[cfg(test)] assert!(cs.is_satisfied()?); let (r1cs_w_i1, r1cs_x_i1) = extract_w_x::(&cs); // includes 1 and public inputs - #[cfg(test)] - assert_eq!(r1cs_x_i1[0], augmented_f_circuit.x.unwrap()); - let r1cs_z = [ vec![C1::ScalarField::one()], r1cs_x_i1.clone(), @@ -448,21 +396,15 @@ where } } -impl - FoldingScheme for HyperNova +impl + FoldingScheme for HyperNova where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, + C1: Curve, { /// Reuse Nova's PreprocessorParam. type PreprocessorParam = PreprocessorParam; @@ -488,7 +430,7 @@ where // main circuit R1CS: let f_circuit = FC::new(fc_params)?; - let augmented_F_circuit = AugmentedFCircuit::::empty( + let augmented_F_circuit = AugmentedFCircuit::::empty( &poseidon_config, f_circuit.clone(), None, @@ -519,7 +461,7 @@ where // main circuit R1CS: let f_circuit = FC::new(fc_params)?; - let augmented_F_circuit = AugmentedFCircuit::::empty( + let augmented_F_circuit = AugmentedFCircuit::::empty( &poseidon_config, f_circuit.clone(), None, @@ -527,7 +469,7 @@ where let ccs = augmented_F_circuit.ccs; // CycleFold circuit R1CS - let cf_circuit = HyperNovaCycleFoldCircuit::::empty(); + let cf_circuit = HyperNovaCycleFoldCircuit::::empty(); let cf_r1cs = get_r1cs_from_cs::(cf_circuit)?; let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; @@ -550,24 +492,33 @@ where return Err(Error::CantBeZero("mu,nu".to_string())); } - let augmented_f_circuit = AugmentedFCircuit::::empty( + let augmented_f_circuit = AugmentedFCircuit::::empty( &prep_param.poseidon_config, prep_param.F.clone(), None, )?; let ccs = augmented_f_circuit.ccs.clone(); - let cf_circuit = HyperNovaCycleFoldCircuit::::empty(); + let cf_circuit = HyperNovaCycleFoldCircuit::::empty(); let cf_r1cs = get_r1cs_from_cs::(cf_circuit)?; // if cs params exist, use them, if not, generate new ones let (cs_pp, cs_vp) = match (&prep_param.cs_pp, &prep_param.cs_vp) { (Some(cs_pp), Some(cs_vp)) => (cs_pp.clone(), cs_vp.clone()), - _ => CS1::setup(&mut rng, ccs.n - ccs.l - 1)?, + // `CS1` is for committing to HyperNova's witness vector `w`, so we + // set `len` to the number of witnesses in `r1cs`. + _ => CS1::setup(&mut rng, ccs.n_witnesses())?, }; let (cf_cs_pp, cf_cs_vp) = match (&prep_param.cf_cs_pp, &prep_param.cf_cs_vp) { (Some(cf_cs_pp), Some(cf_cs_vp)) => (cf_cs_pp.clone(), cf_cs_vp.clone()), - _ => CS2::setup(&mut rng, cf_r1cs.A.n_cols - cf_r1cs.l - 1)?, + _ => CS2::setup( + &mut rng, + // `CS2` is for committing to CycleFold's witness vector `w` and + // error term `e`, where the length of `e` is the number of + // constraints, so we set `len` to the maximum of `e` and `w`'s + // lengths. + max(cf_r1cs.n_constraints(), cf_r1cs.n_witnesses()), + )?, }; let pp = ProverParams:: { @@ -602,14 +553,14 @@ where // prepare the HyperNova's AugmentedFCircuit and CycleFold's circuits and obtain its CCS // and R1CS respectively - let augmented_f_circuit = AugmentedFCircuit::::empty( + let augmented_f_circuit = AugmentedFCircuit::::empty( &pp.poseidon_config, F.clone(), pp.ccs.clone(), )?; let ccs = augmented_f_circuit.ccs.clone(); - let cf_circuit = HyperNovaCycleFoldCircuit::::empty(); + let cf_circuit = HyperNovaCycleFoldCircuit::::empty(); let cf_r1cs = get_r1cs_from_cs::(cf_circuit)?; // compute the public params hash @@ -630,9 +581,6 @@ where // W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the // R1CS that we're working with. Ok(Self { - _gc1: PhantomData, - _c2: PhantomData, - _gc2: PhantomData, ccs, cf_r1cs, poseidon_config: pp.poseidon_config.clone(), @@ -657,7 +605,7 @@ where fn prove_step( &mut self, mut rng: impl RngCore, - external_inputs: Vec, + external_inputs: FC::ExternalInputs, other_instances: Option, ) -> Result<(), Error> { // ensure that commitments are blinding if user has specified so. @@ -676,9 +624,6 @@ where } } - // `sponge` is for digest computation. - let sponge = PoseidonSponge::::new(&self.poseidon_config); - let (Us, Ws, us, ws) = if MU > 1 || NU > 1 { let other_instances = other_instances.ok_or(Error::MissingOtherInstances(MU, NU))?; @@ -716,7 +661,7 @@ where (vec![], vec![], vec![], vec![]) }; - let augmented_f_circuit: AugmentedFCircuit; + let augmented_f_circuit: AugmentedFCircuit; if self.z_i.len() != self.F.state_len() { return Err(Error::NotSameLength( @@ -726,14 +671,6 @@ where self.F.state_len(), )); } - if external_inputs.len() != self.F.external_inputs_len() { - return Err(Error::NotSameLength( - "F.external_inputs_len()".to_string(), - self.F.external_inputs_len(), - "external_inputs.len()".to_string(), - external_inputs.len(), - )); - } if self.i > C1::ScalarField::from_le_bytes_mod_order(&usize::MAX.to_le_bytes()) { return Err(Error::MaxStep); @@ -755,12 +692,6 @@ where i_usize = usize::from_le_bytes(i_bytes); } - let z_i1 = self - .F - .step_native(i_usize, self.z_i.clone(), external_inputs.clone())?; - - // u_{i+1}.x[1] = H(cf_U_{i+1}) - let cf_u_i1_x: C1::ScalarField; let (U_i1, mut W_i1); if self.i == C1::ScalarField::zero() { @@ -768,21 +699,7 @@ where W_i1.r_w = self.W_i.r_w; U_i1 = LCCCS::dummy(&self.ccs); - let u_i1_x = U_i1.hash( - &sponge, - self.pp_hash, - C1::ScalarField::one(), - &self.z_0, - &z_i1, - ); - - // hash the initial (dummy) CycleFold instance, which is used as the 2nd public - // input in the AugmentedFCircuit - cf_u_i1_x = self.cf_U_i.hash_cyclefold(&sponge, self.pp_hash); - - augmented_f_circuit = AugmentedFCircuit:: { - _c2: PhantomData, - _gc2: PhantomData, + augmented_f_circuit = AugmentedFCircuit:: { poseidon_config: self.poseidon_config.clone(), ccs: self.ccs.clone(), pp_hash: Some(self.pp_hash), @@ -797,13 +714,11 @@ where us: Some(us), U_i1_C: Some(U_i1.C), F: self.F.clone(), - x: Some(u_i1_x), nimfs_proof: None, // cyclefold values cf_u_i_cmW: None, cf_U_i: None, - cf_x: Some(cf_u_i1_x), cf_cmT: None, }; } else { @@ -832,43 +747,11 @@ where #[cfg(test)] self.ccs.check_relation(&W_i1, &U_i1)?; - let u_i1_x = U_i1.hash( - &sponge, - self.pp_hash, - self.i + C1::ScalarField::one(), - &self.z_0, - &z_i1, - ); - let rho_bits = rho.into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec(); - let rho_Fq = C1::BaseField::from(::BigInt::from_bits_le( - &rho_bits, - )); // CycleFold part: - // get the vector used as public inputs 'x' in the CycleFold circuit. - // Place the random values and the points coordinates as the public input x: - // In Nova, this is: x == [r, p1, p2, p3]. - // In multifolding schemes such as HyperNova, this is: - // computed_x = [r, p_0, p_1, p_2, ..., p_n], - // where each p_i is in fact p_i.to_constraint_field() - let cf_u_i_x = [ - vec![rho_Fq], - all_Us - .iter() - .flat_map(|Us_i| get_cm_coordinates(&Us_i.C)) - .collect(), - all_us - .iter() - .flat_map(|us_i| get_cm_coordinates(&us_i.C)) - .collect(), - get_cm_coordinates(&U_i1.C), - ] - .concat(); - - let cf_circuit = HyperNovaCycleFoldCircuit:: { - _gc: PhantomData, - r_bits: Some(rho_bits.clone()), + let cf_circuit = HyperNovaCycleFoldCircuit:: { + r_bits: Some(rho_bits), points: Some( [ all_Us.iter().map(|Us_i| Us_i.C).collect::>(), @@ -876,34 +759,21 @@ where ] .concat(), ), - x: Some(cf_u_i_x.clone()), }; - let (_cf_w_i, cf_u_i, cf_W_i1, cf_U_i1, cf_cmT, _) = fold_cyclefold_circuit::< - HyperNovaCycleFoldConfig, - C1, - GC1, - C2, - GC2, - CS2, - H, - >( - &mut transcript_p, - self.cf_r1cs.clone(), - self.cf_cs_pp.clone(), - self.pp_hash, - self.cf_W_i.clone(), // CycleFold running instance witness - self.cf_U_i.clone(), // CycleFold running instance - cf_u_i_x, - cf_circuit, - &mut rng, - )?; - - cf_u_i1_x = cf_U_i1.hash_cyclefold(&sponge, self.pp_hash); - - augmented_f_circuit = AugmentedFCircuit:: { - _c2: PhantomData, - _gc2: PhantomData, + let (cf_u_i, cf_W_i1, cf_U_i1, cf_cmT) = + fold_cyclefold_circuit::, C2, CS2, H>( + &mut transcript_p, + self.cf_r1cs.clone(), + self.cf_cs_pp.clone(), + self.pp_hash, + self.cf_W_i.clone(), // CycleFold running instance witness + self.cf_U_i.clone(), // CycleFold running instance + cf_circuit, + &mut rng, + )?; + + augmented_f_circuit = AugmentedFCircuit:: { poseidon_config: self.poseidon_config.clone(), ccs: self.ccs.clone(), pp_hash: Some(self.pp_hash), @@ -918,13 +788,11 @@ where us: Some(us), U_i1_C: Some(U_i1.C), F: self.F.clone(), - x: Some(u_i1_x), nimfs_proof: Some(nimfs_proof), // cyclefold values cf_u_i_cmW: Some(cf_u_i.cmW), cf_U_i: Some(self.cf_U_i.clone()), - cf_x: Some(cf_u_i1_x), cf_cmT: Some(cf_cmT), }; @@ -933,7 +801,11 @@ where self.cf_U_i = cf_U_i1; } - let (cs, _) = augmented_f_circuit.compute_cs_ccs()?; + let cs = ConstraintSystem::::new_ref(); + let z_i1 = augmented_f_circuit + .compute_next_state(cs.clone())? + .value()?; + let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; #[cfg(test)] assert!(cs.is_satisfied()?); @@ -957,7 +829,7 @@ where // set values for next iteration self.i += C1::ScalarField::one(); // assign z_{i+1} into z_i - self.z_i = z_i1.clone(); + self.z_i = z_i1; self.U_i = U_i1.clone(); self.W_i = W_i1.clone(); @@ -1009,20 +881,17 @@ where let (pp, vp) = params; let f_circuit = FC::new(fcircuit_params)?; - let augmented_f_circuit = AugmentedFCircuit::::empty( + let augmented_f_circuit = AugmentedFCircuit::::empty( &pp.poseidon_config, f_circuit.clone(), None, )?; - let cf_circuit = HyperNovaCycleFoldCircuit::::empty(); + let cf_circuit = HyperNovaCycleFoldCircuit::::empty(); let ccs = augmented_f_circuit.ccs.clone(); let cf_r1cs = get_r1cs_from_cs::(cf_circuit)?; Ok(Self { - _gc1: PhantomData, - _c2: PhantomData, - _gc2: PhantomData, ccs, cf_r1cs, poseidon_config: pp.poseidon_config, @@ -1099,8 +968,8 @@ where #[cfg(test)] mod tests { use crate::commitment::kzg::KZG; - use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_bn254::{Bn254, Fr, G1Projective as Projective}; + use ark_grumpkin::Projective as Projective2; use ark_std::UniformRand; use super::*; @@ -1109,25 +978,27 @@ mod tests { use crate::transcript::poseidon::poseidon_canonical_config; #[test] - pub fn test_ivc() { + pub fn test_ivc() -> Result<(), Error> { let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; // run the test using Pedersen commitments on both sides of the curve cycle - test_ivc_opt::, Pedersen, false>( + let _ = test_ivc_opt::, Pedersen, false>( poseidon_config.clone(), F_circuit, - ); + )?; - test_ivc_opt::, Pedersen, true>( + let _ = test_ivc_opt::, Pedersen, true>( poseidon_config.clone(), F_circuit, - ); + )?; // run the test using KZG for the commitments on the main curve, and Pedersen for the // commitments on the secondary curve - test_ivc_opt::, Pedersen, false>(poseidon_config, F_circuit); + let _ = + test_ivc_opt::, Pedersen, false>(poseidon_config, F_circuit)?; + Ok(()) } #[allow(clippy::type_complexity)] @@ -1139,24 +1010,24 @@ mod tests { >( poseidon_config: PoseidonConfig, F_circuit: CubicFCircuit, - ) { + ) -> Result<(), Error> { let mut rng = ark_std::test_rng(); const MU: usize = 2; const NU: usize = 3; type HN = - HyperNova, CS1, CS2, MU, NU, H>; + HyperNova, CS1, CS2, MU, NU, H>; let prep_param = PreprocessorParam::, CS1, CS2, H>::new( poseidon_config.clone(), F_circuit, ); - let hypernova_params = HN::preprocess(&mut rng, &prep_param).unwrap(); + let hypernova_params = HN::preprocess(&mut rng, &prep_param)?; let z_0 = vec![Fr::from(3_u32)]; - let mut hypernova = HN::init(&hypernova_params, F_circuit, z_0.clone()).unwrap(); + let mut hypernova = HN::init(&hypernova_params, F_circuit, z_0.clone())?; let (w_i_blinding, W_i_blinding) = if H { (Fr::rand(&mut rng), Fr::rand(&mut rng)) @@ -1172,23 +1043,17 @@ mod tests { let mut lcccs = vec![]; for j in 0..MU - 1 { let instance_state = vec![Fr::from(j as u32 + 85_u32)]; - let (U, W) = hypernova - .new_running_instance(&mut rng, instance_state, vec![]) - .unwrap(); + let (U, W) = hypernova.new_running_instance(&mut rng, instance_state, ())?; lcccs.push((U, W)); } let mut cccs = vec![]; for j in 0..NU - 1 { let instance_state = vec![Fr::from(j as u32 + 15_u32)]; - let (u, w) = hypernova - .new_incoming_instance(&mut rng, instance_state, vec![]) - .unwrap(); + let (u, w) = hypernova.new_incoming_instance(&mut rng, instance_state, ())?; cccs.push((u, w)); } - hypernova - .prove_step(&mut rng, vec![], Some((lcccs, cccs))) - .unwrap(); + hypernova.prove_step(&mut rng, (), Some((lcccs, cccs)))?; } assert_eq!(Fr::from(num_steps as u32), hypernova.i); @@ -1196,7 +1061,7 @@ mod tests { HN::verify( hypernova_params.1.clone(), // verifier_params ivc_proof, - ) - .unwrap(); + )?; + Ok(()) } } diff --git a/folding-schemes/src/folding/hypernova/nimfs.rs b/folding-schemes/src/folding/hypernova/nimfs.rs index a3b27bd6..9be284b7 100644 --- a/folding-schemes/src/folding/hypernova/nimfs.rs +++ b/folding-schemes/src/folding/hypernova/nimfs.rs @@ -1,9 +1,7 @@ -use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; use ark_ff::{BigInteger, Field, PrimeField}; use ark_poly::univariate::DensePolynomial; use ark_poly::{DenseUVPolynomial, Polynomial}; -use ark_std::{One, Zero}; +use ark_std::{fmt::Debug, marker::PhantomData, One, Zero}; use super::{ cccs::CCCS, @@ -11,7 +9,7 @@ use super::{ utils::{compute_c, compute_g, compute_sigmas_thetas}, Witness, }; -use crate::arith::ccs::CCS; +use crate::arith::{ccs::CCS, Arith}; use crate::constants::NOVA_N_BITS_RO; use crate::folding::circuits::CF1; use crate::folding::traits::Dummy; @@ -19,19 +17,16 @@ use crate::transcript::Transcript; use crate::utils::sum_check::structs::{IOPProof as SumCheckProof, IOPProverMessage}; use crate::utils::sum_check::{IOPSumCheck, SumCheck}; use crate::utils::virtual_polynomial::VPAuxInfo; -use crate::Error; - -use std::fmt::Debug; -use std::marker::PhantomData; +use crate::{Curve, Error}; /// NIMFSProof defines a multifolding proof #[derive(Clone, Debug, Eq, PartialEq)] -pub struct NIMFSProof { +pub struct NIMFSProof { pub sc_proof: SumCheckProof, pub sigmas_thetas: SigmasThetas, } -impl Dummy<(usize, usize, usize, usize)> for NIMFSProof { +impl Dummy<(usize, usize, usize, usize)> for NIMFSProof { fn dummy((s, t, mu, nu): (usize, usize, usize, usize)) -> Self { // use 'C::ScalarField::one()' instead of 'zero()' to enforce the NIMFSProof to have the // same in-circuit representation to match the number of constraints of an actual proof. @@ -53,7 +48,7 @@ impl Dummy<(usize, usize, usize, usize)> for NIMFSProof { } } -impl Dummy<(&CCS>, usize, usize)> for NIMFSProof { +impl Dummy<(&CCS>, usize, usize)> for NIMFSProof { fn dummy((ccs, mu, nu): (&CCS>, usize, usize)) -> Self { NIMFSProof::dummy((ccs.s, ccs.t, mu, nu)) } @@ -65,15 +60,12 @@ pub struct SigmasThetas(pub Vec>, pub Vec>); #[derive(Debug)] /// Implements the Non-Interactive Multi Folding Scheme described in section 5 of /// [HyperNova](https://eprint.iacr.org/2023/573.pdf) -pub struct NIMFS> { +pub struct NIMFS> { pub _c: PhantomData, pub _t: PhantomData, } -impl> NIMFS -where - ::ScalarField: Absorb, -{ +impl> NIMFS { pub fn fold( lcccs: &[LCCCS], cccs: &[CCCS], @@ -321,7 +313,7 @@ where let beta: Vec = transcript.get_challenges(ccs.s); let vp_aux_info = VPAuxInfo:: { - max_degree: ccs.d + 1, + max_degree: ccs.degree() + 1, num_variables: ccs.s, phantom: PhantomData::, }; @@ -403,7 +395,7 @@ pub mod tests { use super::*; use crate::arith::{ ccs::tests::{get_test_ccs, get_test_z}, - Arith, + ArithRelation, }; use crate::transcript::poseidon::poseidon_canonical_config; use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; @@ -415,33 +407,35 @@ pub mod tests { use ark_pallas::{Fr, Projective}; #[test] - fn test_fold() { + fn test_fold() -> Result<(), Error> { let ccs = get_test_ccs(); let z1 = get_test_z::(3); let z2 = get_test_z::(4); let (w1, x1) = ccs.split_z(&z1); let (w2, x2) = ccs.split_z(&z2); - ccs.check_relation(&w1, &x1).unwrap(); - ccs.check_relation(&w2, &x2).unwrap(); + ccs.check_relation(&w1, &x1)?; + ccs.check_relation(&w2, &x2)?; let mut rng = test_rng(); let r_x_prime: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); - let sigmas_thetas = - compute_sigmas_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime).unwrap(); + let sigmas_thetas = compute_sigmas_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime)?; - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; - let (lcccs, w1) = ccs - .to_lcccs::<_, Projective, Pedersen, false>(&mut rng, &pedersen_params, &z1) - .unwrap(); - let (cccs, w2) = ccs - .to_cccs::<_, Projective, Pedersen, false>(&mut rng, &pedersen_params, &z2) - .unwrap(); + let (lcccs, w1) = ccs.to_lcccs::<_, Projective, Pedersen, false>( + &mut rng, + &pedersen_params, + &z1, + )?; + let (cccs, w2) = ccs.to_cccs::<_, Projective, Pedersen, false>( + &mut rng, + &pedersen_params, + &z2, + )?; - ccs.check_relation(&w1, &lcccs).unwrap(); - ccs.check_relation(&w2, &cccs).unwrap(); + ccs.check_relation(&w1, &lcccs)?; + ccs.check_relation(&w2, &cccs)?; let mut rng = test_rng(); let rho = Fr::rand(&mut rng); @@ -457,18 +451,18 @@ pub mod tests { let w_folded = NIMFS::>::fold_witness(&[w1], &[w2], rho); // check lcccs relation - ccs.check_relation(&w_folded, &folded).unwrap(); + ccs.check_relation(&w_folded, &folded)?; + Ok(()) } /// Perform multifolding of an LCCCS instance with a CCCS instance (as described in the paper) #[test] - pub fn test_basic_multifolding() { + pub fn test_basic_multifolding() -> Result<(), Error> { let mut rng = test_rng(); // Create a basic CCS circuit let ccs = get_test_ccs::(); - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; // Generate a satisfying witness let z_1 = get_test_z(3); @@ -476,13 +470,11 @@ pub mod tests { let z_2 = get_test_z(4); // Create the LCCCS instance out of z_1 - let (running_instance, w1) = ccs - .to_lcccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &z_1) - .unwrap(); + let (running_instance, w1) = + ccs.to_lcccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &z_1)?; // Create the CCCS instance out of z_2 - let (new_instance, w2) = ccs - .to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &z_2) - .unwrap(); + let (new_instance, w2) = + ccs.to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &z_2)?; // Prover's transcript let poseidon_config = poseidon_canonical_config::(); @@ -498,8 +490,7 @@ pub mod tests { &[new_instance.clone()], &[w1], &[w2], - ) - .unwrap(); + )?; // Verifier's transcript let mut transcript_v: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); @@ -512,29 +503,27 @@ pub mod tests { &[running_instance.clone()], &[new_instance.clone()], proof, - ) - .unwrap(); + )?; assert_eq!(folded_lcccs, folded_lcccs_v); // Check that the folded LCCCS instance is a valid instance with respect to the folded witness - ccs.check_relation(&folded_witness, &folded_lcccs).unwrap(); + ccs.check_relation(&folded_witness, &folded_lcccs)?; + Ok(()) } /// Perform multiple steps of multifolding of an LCCCS instance with a CCCS instance #[test] - pub fn test_multifolding_two_instances_multiple_steps() { + pub fn test_multifolding_two_instances_multiple_steps() -> Result<(), Error> { let mut rng = test_rng(); let ccs = get_test_ccs::(); - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; // LCCCS witness let z_1 = get_test_z(2); - let (mut running_instance, mut w1) = ccs - .to_lcccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &z_1) - .unwrap(); + let (mut running_instance, mut w1) = + ccs.to_lcccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &z_1)?; let poseidon_config = poseidon_canonical_config::(); @@ -549,9 +538,8 @@ pub mod tests { // CCS witness let z_2 = get_test_z(i); - let (new_instance, w2) = ccs - .to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &z_2) - .unwrap(); + let (new_instance, w2) = + ccs.to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &z_2)?; // run the prover side of the multifolding let (proof, folded_lcccs, folded_witness, _) = @@ -562,8 +550,7 @@ pub mod tests { &[new_instance.clone()], &[w1], &[w2], - ) - .unwrap(); + )?; // run the verifier side of the multifolding let folded_lcccs_v = NIMFS::>::verify( @@ -572,27 +559,26 @@ pub mod tests { &[running_instance.clone()], &[new_instance.clone()], proof, - ) - .unwrap(); + )?; assert_eq!(folded_lcccs, folded_lcccs_v); // check that the folded instance with the folded witness holds the LCCCS relation - ccs.check_relation(&folded_witness, &folded_lcccs).unwrap(); + ccs.check_relation(&folded_witness, &folded_lcccs)?; running_instance = folded_lcccs; w1 = folded_witness; } + Ok(()) } /// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step. #[test] - pub fn test_multifolding_mu_nu_instances() { + pub fn test_multifolding_mu_nu_instances() -> Result<(), Error> { let mut rng = test_rng(); // Create a basic CCS circuit let ccs = get_test_ccs::(); - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; let mu = 10; let nu = 15; @@ -613,9 +599,8 @@ pub mod tests { let mut lcccs_instances = Vec::new(); let mut w_lcccs = Vec::new(); for z_i in z_lcccs.iter() { - let (running_instance, w) = ccs - .to_lcccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, z_i) - .unwrap(); + let (running_instance, w) = + ccs.to_lcccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, z_i)?; lcccs_instances.push(running_instance); w_lcccs.push(w); } @@ -623,9 +608,8 @@ pub mod tests { let mut cccs_instances = Vec::new(); let mut w_cccs = Vec::new(); for z_i in z_cccs.iter() { - let (new_instance, w) = ccs - .to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, z_i) - .unwrap(); + let (new_instance, w) = + ccs.to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, z_i)?; cccs_instances.push(new_instance); w_cccs.push(w); } @@ -644,8 +628,7 @@ pub mod tests { &cccs_instances, &w_lcccs, &w_cccs, - ) - .unwrap(); + )?; // Verifier's transcript let mut transcript_v: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); @@ -658,24 +641,23 @@ pub mod tests { &lcccs_instances, &cccs_instances, proof, - ) - .unwrap(); + )?; assert_eq!(folded_lcccs, folded_lcccs_v); // Check that the folded LCCCS instance is a valid instance with respect to the folded witness - ccs.check_relation(&folded_witness, &folded_lcccs).unwrap(); + ccs.check_relation(&folded_witness, &folded_lcccs)?; + Ok(()) } /// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step /// and repeats the process doing multiple steps. #[test] - pub fn test_multifolding_mu_nu_instances_multiple_steps() { + pub fn test_multifolding_mu_nu_instances_multiple_steps() -> Result<(), Error> { let mut rng = test_rng(); // Create a basic CCS circuit let ccs = get_test_ccs::(); - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; let poseidon_config = poseidon_canonical_config::(); // Prover's transcript @@ -709,9 +691,11 @@ pub mod tests { let mut lcccs_instances = Vec::new(); let mut w_lcccs = Vec::new(); for z_i in z_lcccs.iter() { - let (running_instance, w) = ccs - .to_lcccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, z_i) - .unwrap(); + let (running_instance, w) = ccs.to_lcccs::<_, _, Pedersen, false>( + &mut rng, + &pedersen_params, + z_i, + )?; lcccs_instances.push(running_instance); w_lcccs.push(w); } @@ -719,9 +703,11 @@ pub mod tests { let mut cccs_instances = Vec::new(); let mut w_cccs = Vec::new(); for z_i in z_cccs.iter() { - let (new_instance, w) = ccs - .to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, z_i) - .unwrap(); + let (new_instance, w) = ccs.to_cccs::<_, _, Pedersen, false>( + &mut rng, + &pedersen_params, + z_i, + )?; cccs_instances.push(new_instance); w_cccs.push(w); } @@ -735,8 +721,7 @@ pub mod tests { &cccs_instances, &w_lcccs, &w_cccs, - ) - .unwrap(); + )?; // Run the verifier side of the multifolding let folded_lcccs_v = NIMFS::>::verify( @@ -745,13 +730,13 @@ pub mod tests { &lcccs_instances, &cccs_instances, proof, - ) - .unwrap(); + )?; assert_eq!(folded_lcccs, folded_lcccs_v); // Check that the folded LCCCS instance is a valid instance with respect to the folded witness - ccs.check_relation(&folded_witness, &folded_lcccs).unwrap(); + ccs.check_relation(&folded_witness, &folded_lcccs)?; } + Ok(()) } } diff --git a/folding-schemes/src/folding/hypernova/utils.rs b/folding-schemes/src/folding/hypernova/utils.rs index 71b9108e..6ed4d841 100644 --- a/folding-schemes/src/folding/hypernova/utils.rs +++ b/folding-schemes/src/folding/hypernova/utils.rs @@ -1,6 +1,4 @@ -use ark_ec::CurveGroup; use ark_ff::PrimeField; -use ark_poly::DenseMultilinearExtension; use ark_poly::MultilinearExtension; use ark_std::One; use std::sync::Arc; @@ -11,7 +9,7 @@ use crate::arith::ccs::CCS; use crate::utils::mle::dense_vec_to_dense_mle; use crate::utils::vec::mat_vec_mul; use crate::utils::virtual_polynomial::{build_eq_x_r_vec, eq_eval, VirtualPolynomial}; -use crate::Error; +use crate::{Curve, Error}; /// Compute the arrays of sigma_i and theta_i from step 4 corresponding to the LCCCS and CCCS /// instances @@ -23,27 +21,27 @@ pub fn compute_sigmas_thetas( ) -> Result, Error> { let mut sigmas: Vec> = Vec::new(); for z_lcccs_i in z_lcccs { - let mut Mzs: Vec> = vec![]; - for M_j in ccs.M.iter() { - Mzs.push(dense_vec_to_dense_mle(ccs.s, &mat_vec_mul(M_j, z_lcccs_i)?)); - } - let sigma_i = Mzs + let sigma_i = ccs + .M .iter() - .map(|Mz| Mz.evaluate(r_x_prime).ok_or(Error::EvaluationFail)) - .collect::>()?; + .map(|M_j| { + let Mz = dense_vec_to_dense_mle(ccs.s, &mat_vec_mul(M_j, z_lcccs_i)?); + Ok(Mz.fix_variables(r_x_prime)[0]) + }) + .collect::, Error>>()?; sigmas.push(sigma_i); } let mut thetas: Vec> = Vec::new(); for z_cccs_i in z_cccs { - let mut Mzs: Vec> = vec![]; - for M_j in ccs.M.iter() { - Mzs.push(dense_vec_to_dense_mle(ccs.s, &mat_vec_mul(M_j, z_cccs_i)?)); - } - let theta_i = Mzs + let theta_i = ccs + .M .iter() - .map(|Mz| Mz.evaluate(r_x_prime).ok_or(Error::EvaluationFail)) - .collect::>()?; + .map(|M_j| { + let Mz = dense_vec_to_dense_mle(ccs.s, &mat_vec_mul(M_j, z_cccs_i)?); + Ok(Mz.fix_variables(r_x_prime)[0]) + }) + .collect::, Error>>()?; thetas.push(theta_i); } Ok(SigmasThetas(sigmas, thetas)) @@ -83,14 +81,14 @@ pub fn compute_c( let e2 = eq_eval(beta, r_x_prime)?; for (k, thetas) in vec_thetas.iter().enumerate() { // + gamma^{t+1} * e2 * sum c_i * prod theta_j - let mut lhs = F::zero(); - for i in 0..ccs.q { + let prods = ccs.S.iter().zip(&ccs.c).map(|(S_i, &c_i)| { let mut prod = F::one(); - for j in ccs.S[i].clone() { + for &j in S_i { prod *= thetas[j]; } - lhs += ccs.c[i] * prod; - } + c_i * prod + }); + let lhs = F::sum(prods); let gamma_t1 = gamma.pow([(mu * ccs.t + k) as u64]); c += gamma_t1 * e2 * lhs; } @@ -98,17 +96,14 @@ pub fn compute_c( } /// Compute g(x) polynomial for the given inputs. -pub fn compute_g( +pub fn compute_g( ccs: &CCS, running_instances: &[LCCCS], z_lcccs: &[Vec], z_cccs: &[Vec], gamma: C::ScalarField, beta: &[C::ScalarField], -) -> Result, Error> -where - C::ScalarField: PrimeField, -{ +) -> Result, Error> { assert_eq!(running_instances.len(), z_lcccs.len()); let mut g = VirtualPolynomial::::new(ccs.s); @@ -133,24 +128,21 @@ where } let eq_beta = build_eq_x_r_vec(beta)?; - let eq_beta_mle = dense_vec_to_dense_mle(ccs.s, &eq_beta); + let eq_beta_mle = Arc::new(dense_vec_to_dense_mle(ccs.s, &eq_beta)); #[allow(clippy::needless_range_loop)] for k in 0..nu { // Q_k - for i in 0..ccs.q { + for (S_i, &c_i) in ccs.S.iter().zip(&ccs.c) { let mut Q_k = vec![]; - for &j in ccs.S[i].iter() { - Q_k.push(dense_vec_to_dense_mle( + for &j in S_i { + Q_k.push(Arc::new(dense_vec_to_dense_mle( ccs.s, &mat_vec_mul(&ccs.M[j], &z_cccs[k])?, - )); + ))); } Q_k.push(eq_beta_mle.clone()); - g.add_mle_list( - Q_k.iter().map(|v| Arc::new(v.clone())), - ccs.c[i] * gamma_pow, - )?; + g.add_mle_list(Q_k, c_i * gamma_pow)?; } gamma_pow *= gamma; } @@ -169,7 +161,7 @@ pub mod tests { use super::*; use crate::arith::{ ccs::tests::{get_test_ccs, get_test_z}, - Arith, + Arith, ArithRelation, }; use crate::commitment::{pedersen::Pedersen, CommitmentScheme}; use crate::folding::hypernova::lcccs::tests::compute_Ls; @@ -198,7 +190,7 @@ pub mod tests { /// of the matrix and the z vector. This technique is also used extensively in "An Algebraic Framework for /// Universal and Updatable SNARKs". #[test] - fn test_compute_M_r_y_compression() { + fn test_compute_M_r_y_compression() -> Result<(), Error> { let mut rng = test_rng(); // s = 2, s' = 3 @@ -223,17 +215,18 @@ pub mod tests { assert_eq!(M_r_y.evaluations[j], rlc); } + Ok(()) } #[test] - fn test_compute_sigmas_thetas() { + fn test_compute_sigmas_thetas() -> Result<(), Error> { let ccs = get_test_ccs(); let z1 = get_test_z(3); let z2 = get_test_z(4); let (w1, x1) = ccs.split_z(&z1); let (w2, x2) = ccs.split_z(&z2); - ccs.check_relation(&w1, &x1).unwrap(); - ccs.check_relation(&w2, &x2).unwrap(); + ccs.check_relation(&w1, &x1)?; + ccs.check_relation(&w2, &x2)?; let mut rng = test_rng(); let gamma: Fr = Fr::rand(&mut rng); @@ -241,14 +234,11 @@ pub mod tests { let r_x_prime: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); // Initialize a multifolding object - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); - let (lcccs_instance, _) = ccs - .to_lcccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &z1) - .unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; + let (lcccs_instance, _) = + ccs.to_lcccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &z1)?; - let sigmas_thetas = - compute_sigmas_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime).unwrap(); + let sigmas_thetas = compute_sigmas_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime)?; let g = compute_g( &ccs, @@ -257,13 +247,12 @@ pub mod tests { &[z2.clone()], gamma, &beta, - ) - .unwrap(); + )?; // we expect g(r_x_prime) to be equal to: // c = (sum gamma^j * e1 * sigma_j) + gamma^{t+1} * e2 * sum c_i * prod theta_j // from compute_c - let expected_c = g.evaluate(&r_x_prime).unwrap(); + let expected_c = g.evaluate(&r_x_prime)?; let c = compute_c::( &ccs, &sigmas_thetas, @@ -271,13 +260,13 @@ pub mod tests { &beta, &vec![lcccs_instance.r_x], &r_x_prime, - ) - .unwrap(); + )?; assert_eq!(c, expected_c); + Ok(()) } #[test] - fn test_compute_g() { + fn test_compute_g() -> Result<(), Error> { let mut rng = test_rng(); // generate test CCS & z vectors @@ -286,18 +275,16 @@ pub mod tests { let z2 = get_test_z(4); let (w1, x1) = ccs.split_z(&z1); let (w2, x2) = ccs.split_z(&z2); - ccs.check_relation(&w1, &x1).unwrap(); - ccs.check_relation(&w2, &x2).unwrap(); + ccs.check_relation(&w1, &x1)?; + ccs.check_relation(&w2, &x2)?; let gamma: Fr = Fr::rand(&mut rng); let beta: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); // Initialize a multifolding object - let (pedersen_params, _) = - Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); - let (lcccs_instance, _) = ccs - .to_lcccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &z1) - .unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n_witnesses())?; + let (lcccs_instance, _) = + ccs.to_lcccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &z1)?; // Compute g(x) with that r_x let g = compute_g::( @@ -307,13 +294,12 @@ pub mod tests { &[z2.clone()], gamma, &beta, - ) - .unwrap(); + )?; // evaluate g(x) over x \in {0,1}^s let mut g_on_bhc = Fr::zero(); for x in BooleanHypercube::new(ccs.s) { - g_on_bhc += g.evaluate(&x).unwrap(); + g_on_bhc += g.evaluate(&x)?; } // Q(x) over bhc is assumed to be zero, as checked in the test 'test_compute_Q' @@ -331,16 +317,17 @@ pub mod tests { // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s let mut sum_Lj_on_bhc = Fr::zero(); - let vec_L = compute_Ls(&ccs, &lcccs_instance, &z1); + let vec_L = compute_Ls(&ccs, &lcccs_instance, &z1)?; for x in BooleanHypercube::new(ccs.s) { for (j, Lj) in vec_L.iter().enumerate() { let gamma_j = gamma.pow([j as u64]); - sum_Lj_on_bhc += Lj.evaluate(&x).unwrap() * gamma_j; + sum_Lj_on_bhc += Lj.evaluate(&x)? * gamma_j; } } // evaluating g(x) over the boolean hypercube should give the same result as evaluating the // sum of gamma^j * Lj(x) over the boolean hypercube assert_eq!(g_on_bhc, sum_Lj_on_bhc); + Ok(()) } } diff --git a/folding-schemes/src/folding/mod.rs b/folding-schemes/src/folding/mod.rs index 32342886..a2b23a8d 100644 --- a/folding-schemes/src/folding/mod.rs +++ b/folding-schemes/src/folding/mod.rs @@ -6,11 +6,10 @@ pub mod traits; #[cfg(test)] pub mod tests { - use ark_ec::CurveGroup; - use ark_ff::PrimeField; - use ark_pallas::{constraints::GVar as GVar1, Fr, Projective as G1}; + + use ark_pallas::{Fr, Projective as G1}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; - use ark_vesta::{constraints::GVar as GVar2, Projective as G2}; + use ark_vesta::Projective as G2; use std::io::Write; use crate::commitment::pedersen::Pedersen; @@ -22,28 +21,26 @@ pub mod tests { use crate::frontend::utils::CubicFCircuit; use crate::frontend::FCircuit; use crate::transcript::poseidon::poseidon_canonical_config; - use crate::Error; use crate::FoldingScheme; + use crate::{Curve, Error}; /// tests the IVC proofs and its serializers for the 3 implemented IVCs: Nova, HyperNova and /// ProtoGalaxy. #[test] - fn test_serialize_ivc_nova_hypernova_protogalaxy() { + fn test_serialize_ivc_nova_hypernova_protogalaxy() -> Result<(), Error> { let poseidon_config = poseidon_canonical_config::(); type FC = CubicFCircuit; - let f_circuit = FC::new(()).unwrap(); + let f_circuit = FC::new(())?; // test Nova - type N = Nova, Pedersen, false>; + type N = Nova, Pedersen, false>; let prep_param = NovaPreprocessorParam::new(poseidon_config.clone(), f_circuit); - test_serialize_ivc_opt::("nova".to_string(), prep_param.clone()).unwrap(); + test_serialize_ivc_opt::("nova".to_string(), prep_param.clone())?; // test HyperNova type HN = HyperNova< G1, - GVar1, G2, - GVar2, FC, Pedersen, Pedersen, @@ -51,45 +48,41 @@ pub mod tests { 1, // nu false, >; - test_serialize_ivc_opt::("hypernova".to_string(), prep_param).unwrap(); + test_serialize_ivc_opt::("hypernova".to_string(), prep_param)?; // test ProtoGalaxy - type P = ProtoGalaxy, Pedersen>; + type P = ProtoGalaxy, Pedersen>; let prep_param = (poseidon_config, f_circuit); - test_serialize_ivc_opt::("protogalaxy".to_string(), prep_param).unwrap(); + test_serialize_ivc_opt::("protogalaxy".to_string(), prep_param)?; + Ok(()) } fn test_serialize_ivc_opt< - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, FC: FCircuit, FS: FoldingScheme, >( name: String, prep_param: FS::PreprocessorParam, - ) -> Result<(), Error> - where - C1: CurveGroup, - C2::BaseField: PrimeField, - FC: FCircuit, - { + ) -> Result<(), Error> { let mut rng = ark_std::test_rng(); let F_circuit = FC::new(())?; let fs_params = FS::preprocess(&mut rng, &prep_param)?; let z_0 = vec![C1::ScalarField::from(3_u32)]; - let mut fs = FS::init(&fs_params, F_circuit, z_0.clone()).unwrap(); + let mut fs = FS::init(&fs_params, F_circuit, z_0.clone())?; // perform multiple IVC steps (internally folding) let num_steps: usize = 3; for _ in 0..num_steps { - fs.prove_step(&mut rng, vec![], None).unwrap(); + fs.prove_step(&mut rng, FC::ExternalInputs::default(), None)?; } // verify the IVCProof let ivc_proof: FS::IVCProof = fs.ivc_proof(); - FS::verify(fs_params.1.clone(), ivc_proof.clone()).unwrap(); + FS::verify(fs_params.1.clone(), ivc_proof.clone())?; // serialize the IVCProof and store it in a file let mut writer = vec![]; @@ -98,31 +91,23 @@ pub mod tests { let mut file = std::fs::OpenOptions::new() .create(true) .write(true) - .open(format!("./ivc_proof-{}.serialized", name)) - .unwrap(); - file.write_all(&writer).unwrap(); + .open(format!("./ivc_proof-{}.serialized", name))?; + file.write_all(&writer)?; // read the IVCProof from the file deserializing it - let bytes = std::fs::read(format!("./ivc_proof-{}.serialized", name)).unwrap(); - let deserialized_ivc_proof = - FS::IVCProof::deserialize_compressed(bytes.as_slice()).unwrap(); + let bytes = std::fs::read(format!("./ivc_proof-{}.serialized", name))?; + let deserialized_ivc_proof = FS::IVCProof::deserialize_compressed(bytes.as_slice())?; // verify deserialized IVCProof - FS::verify(fs_params.1.clone(), deserialized_ivc_proof.clone()).unwrap(); + FS::verify(fs_params.1.clone(), deserialized_ivc_proof.clone())?; // build the FS from the given IVCProof, FC::Params, ProverParams and VerifierParams - let mut new_fs = FS::from_ivc_proof(deserialized_ivc_proof, (), fs_params.clone()).unwrap(); + let mut new_fs = FS::from_ivc_proof(deserialized_ivc_proof, (), fs_params.clone())?; // serialize the Nova params let mut fs_pp_serialized = vec![]; - fs_params - .0 - .serialize_compressed(&mut fs_pp_serialized) - .unwrap(); + fs_params.0.serialize_compressed(&mut fs_pp_serialized)?; let mut fs_vp_serialized = vec![]; - fs_params - .1 - .serialize_compressed(&mut fs_vp_serialized) - .unwrap(); + fs_params.1.serialize_compressed(&mut fs_vp_serialized)?; // deserialize the Nova params. This would be done by the client reading from a file let _fs_pp_deserialized = FS::pp_deserialize_with_mode( @@ -130,15 +115,14 @@ pub mod tests { ark_serialize::Compress::Yes, ark_serialize::Validate::Yes, (), // FCircuit's Params - ) - .unwrap(); + )?; // perform several IVC steps on both the original FS instance and the recovered from the // serialization new FS instance let num_steps: usize = 3; for _ in 0..num_steps { - new_fs.prove_step(&mut rng, vec![], None).unwrap(); - fs.prove_step(&mut rng, vec![], None).unwrap(); + new_fs.prove_step(&mut rng, FC::ExternalInputs::default(), None)?; + fs.prove_step(&mut rng, FC::ExternalInputs::default(), None)?; } // check that the IVCProofs from both FS instances are equal @@ -149,8 +133,7 @@ pub mod tests { ark_serialize::Compress::Yes, ark_serialize::Validate::Yes, (), // fcircuit_params - ) - .unwrap(); + )?; // get the IVCProof let ivc_proof: FS::IVCProof = new_fs.ivc_proof(); @@ -162,10 +145,10 @@ pub mod tests { .is_ok()); // deserialize IVCProof let ivc_proof_deserialized = - FS::IVCProof::deserialize_compressed(ivc_proof_serialized.as_slice()).unwrap(); + FS::IVCProof::deserialize_compressed(ivc_proof_serialized.as_slice())?; // verify the last IVCProof from the recovered from serialization FS - FS::verify(fs_vp_deserialized.clone(), ivc_proof_deserialized).unwrap(); + FS::verify(fs_vp_deserialized.clone(), ivc_proof_deserialized)?; Ok(()) } diff --git a/folding-schemes/src/folding/nova/circuits.rs b/folding-schemes/src/folding/nova/circuits.rs index b307b3e1..6b7f8fa1 100644 --- a/folding-schemes/src/folding/nova/circuits.rs +++ b/folding-schemes/src/folding/nova/circuits.rs @@ -2,9 +2,7 @@ use ark_crypto_primitives::sponge::{ constraints::CryptographicSpongeVar, poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge}, - Absorb, }; -use ark_ec::{CurveGroup, Group}; use ark_ff::PrimeField; use ark_r1cs_std::{ alloc::AllocVar, @@ -12,11 +10,10 @@ use ark_r1cs_std::{ eq::EqGadget, fields::{fp::FpVar, FieldVar}, prelude::CurveVar, - R1CSVar, ToConstraintFieldGadget, + R1CSVar, }; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; use ark_std::{fmt::Debug, One, Zero}; -use core::marker::PhantomData; use super::{ nifs::{ @@ -31,11 +28,12 @@ use crate::folding::circuits::{ CycleFoldConfig, NIFSFullGadget, }, nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar}, - CF1, CF2, + CF1, }; use crate::folding::traits::{CommittedInstanceVarOps, Dummy}; use crate::frontend::FCircuit; use crate::transcript::AbsorbNonNativeGadget; +use crate::Curve; /// `AugmentedFCircuit` enhances the original step function `F`, so that it can /// be used in recursive arguments such as IVC. @@ -50,27 +48,20 @@ use crate::transcript::AbsorbNonNativeGadget; /// defined in [CycleFold](https://eprint.iacr.org/2023/1192.pdf). These extra /// constraints verify the correct folding of CycleFold instances. #[derive(Debug, Clone)] -pub struct AugmentedFCircuit< - C1: CurveGroup, - C2: CurveGroup, - GC2: CurveVar>, - FC: FCircuit>, -> { - pub(super) _gc2: PhantomData, +pub struct AugmentedFCircuit>> { pub(super) poseidon_config: PoseidonConfig>, pub(super) pp_hash: Option>, pub(super) i: Option>, pub(super) i_usize: Option, pub(super) z_0: Option>, pub(super) z_i: Option>, - pub(super) external_inputs: Option>, + pub(super) external_inputs: Option, pub(super) u_i_cmW: Option, pub(super) U_i: Option>, pub(super) U_i1_cmE: Option, pub(super) U_i1_cmW: Option, pub(super) cmT: Option, - pub(super) F: FC, // F circuit - pub(super) x: Option>, // public input (u_{i+1}.x[0]) + pub(super) F: FC, // F circuit // cyclefold verifier on C1 // Here 'cf1, cf2' are for each of the CycleFold circuits, corresponding to the fold of cmW and @@ -80,15 +71,11 @@ pub struct AugmentedFCircuit< pub(super) cf_U_i: Option>, // input pub(super) cf1_cmT: Option, pub(super) cf2_cmT: Option, - pub(super) cf_x: Option>, // public input (u_{i+1}.x[1]) } -impl>, FC: FCircuit>> - AugmentedFCircuit -{ +impl>> AugmentedFCircuit { pub fn empty(poseidon_config: &PoseidonConfig>, F_circuit: FC) -> Self { Self { - _gc2: PhantomData, poseidon_config: poseidon_config.clone(), pp_hash: None, i: None, @@ -102,31 +89,26 @@ impl>, FC: FCircuit ConstraintSynthesizer> for AugmentedFCircuit +impl AugmentedFCircuit where - C1: CurveGroup, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit>, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, { - fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { + pub fn compute_next_state( + self, + cs: ConstraintSystemRef>, + ) -> Result>>, SynthesisError> { let pp_hash = FpVar::>::new_witness(cs.clone(), || { Ok(self.pp_hash.unwrap_or_else(CF1::::zero)) })?; @@ -143,10 +125,8 @@ where .z_i .unwrap_or(vec![CF1::::zero(); self.F.state_len()])) })?; - let external_inputs = Vec::>>::new_witness(cs.clone(), || { - Ok(self - .external_inputs - .unwrap_or(vec![CF1::::zero(); self.F.external_inputs_len()])) + let external_inputs = FC::ExternalInputsVar::new_witness(cs.clone(), || { + Ok(self.external_inputs.unwrap_or_default()) })?; let u_dummy = CommittedInstance::dummy(2); @@ -164,11 +144,13 @@ where NonNativeAffineVar::new_witness(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?; let cf_u_dummy = CycleFoldCommittedInstance::dummy(NovaCycleFoldConfig::::IO_LEN); - let cf_U_i = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + let cf_U_i = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { Ok(self.cf_U_i.unwrap_or(cf_u_dummy.clone())) })?; - let cf1_cmT = GC2::new_witness(cs.clone(), || Ok(self.cf1_cmT.unwrap_or_else(C2::zero)))?; - let cf2_cmT = GC2::new_witness(cs.clone(), || Ok(self.cf2_cmT.unwrap_or_else(C2::zero)))?; + let cf1_cmT = + C2::Var::new_witness(cs.clone(), || Ok(self.cf1_cmT.unwrap_or_else(C2::zero)))?; + let cf2_cmT = + C2::Var::new_witness(cs.clone(), || Ok(self.cf2_cmT.unwrap_or_else(C2::zero)))?; // `sponge` is for digest computation. let sponge = PoseidonSpongeVar::::new(cs.clone(), &self.poseidon_config); @@ -249,8 +231,17 @@ where &z_0, &z_i1, )?; - let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?; - x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?; + let x = is_basecase.select(&u_i1_x_base, &u_i1_x)?; + // This line "converts" `x` from a witness to a public input. + // Instead of directly modifying the constraint system, we explicitly + // allocate a public input and enforce that its value is indeed `x`. + // While comparing `x` with itself seems redundant, this is necessary + // because: + // - `.value()` allows an honest prover to extract public inputs without + // computing them outside the circuit. + // - `.enforce_equal()` prevents a malicious prover from claiming wrong + // public inputs that are not the honest `x` computed in-circuit. + FpVar::new_input(cs.clone(), || x.value())?.enforce_equal(&x)?; // CycleFold part // C.1. Compute cf1_u_i.x and cf2_u_i.x @@ -272,21 +263,21 @@ where // C.2. Construct `cf1_u_i` and `cf2_u_i` let cf1_u_i = CycleFoldCommittedInstanceVar { // cf1_u_i.cmE = 0 - cmE: GC2::zero(), + cmE: C2::Var::zero(), // cf1_u_i.u = 1 u: NonNativeUintVar::new_constant(cs.clone(), C1::BaseField::one())?, // cf1_u_i.cmW is provided by the prover as witness - cmW: GC2::new_witness(cs.clone(), || Ok(self.cf1_u_i_cmW.unwrap_or(C2::zero())))?, + cmW: C2::Var::new_witness(cs.clone(), || Ok(self.cf1_u_i_cmW.unwrap_or(C2::zero())))?, // cf1_u_i.x is computed in step 1 x: cfW_x, }; let cf2_u_i = CycleFoldCommittedInstanceVar { // cf2_u_i.cmE = 0 - cmE: GC2::zero(), + cmE: C2::Var::zero(), // cf2_u_i.u = 1 u: NonNativeUintVar::new_constant(cs.clone(), C1::BaseField::one())?, // cf2_u_i.cmW is provided by the prover as witness - cmW: GC2::new_witness(cs.clone(), || Ok(self.cf2_u_i_cmW.unwrap_or(C2::zero())))?, + cmW: C2::Var::new_witness(cs.clone(), || Ok(self.cf2_u_i_cmW.unwrap_or(C2::zero())))?, // cf2_u_i.x is computed in step 1 x: cfE_x, }; @@ -296,7 +287,7 @@ where // compute cf1_r = H(cf1_u_i, cf_U_i, cf1_cmT) // cf_r_bits is denoted by rho* in the paper. - let cf1_r_bits = CycleFoldChallengeGadget::::get_challenge_gadget( + let cf1_r_bits = CycleFoldChallengeGadget::::get_challenge_gadget( &mut transcript, pp_hash.clone(), cf_U_i_vec, @@ -304,19 +295,18 @@ where cf1_cmT.clone(), )?; // Fold cf1_u_i & cf_U_i into cf1_U_{i+1} - let cf1_U_i1 = NIFSFullGadget::::fold_committed_instance( - cf1_r_bits, cf1_cmT, cf_U_i, cf1_u_i, - )?; + let cf1_U_i1 = + NIFSFullGadget::::fold_committed_instance(cf1_r_bits, cf1_cmT, cf_U_i, cf1_u_i)?; // same for cf2_r: - let cf2_r_bits = CycleFoldChallengeGadget::::get_challenge_gadget( + let cf2_r_bits = CycleFoldChallengeGadget::::get_challenge_gadget( &mut transcript, pp_hash.clone(), cf1_U_i1.to_native_sponge_field_elements()?, cf2_u_i.clone(), cf2_cmT.clone(), )?; - let cf_U_i1 = NIFSFullGadget::::fold_committed_instance( + let cf_U_i1 = NIFSFullGadget::::fold_committed_instance( cf2_r_bits, cf2_cmT, cf1_U_i1, // the output from NIFS.V(cf1_r, cf_U, cfE_u) cf2_u_i, )?; @@ -327,14 +317,32 @@ where // Non-base case: u_{i+1}.x[1] == H(cf_U_{i+1}) let (cf_u_i1_x, _) = cf_U_i1.clone().hash(&sponge, pp_hash.clone())?; let (cf_u_i1_x_base, _) = - CycleFoldCommittedInstanceVar::::new_constant(cs.clone(), cf_u_dummy)? + CycleFoldCommittedInstanceVar::::new_constant(cs.clone(), cf_u_dummy)? .hash(&sponge, pp_hash)?; - let cf_x = FpVar::new_input(cs.clone(), || { - Ok(self.cf_x.unwrap_or(cf_u_i1_x_base.value()?)) - })?; - cf_x.enforce_equal(&is_basecase.select(&cf_u_i1_x_base, &cf_u_i1_x)?)?; + let cf_x = is_basecase.select(&cf_u_i1_x_base, &cf_u_i1_x)?; + // This line "converts" `cf_x` from a witness to a public input. + // Instead of directly modifying the constraint system, we explicitly + // allocate a public input and enforce that its value is indeed `cf_x`. + // While comparing `cf_x` with itself seems redundant, this is necessary + // because: + // - `.value()` allows an honest prover to extract public inputs without + // computing them outside the circuit. + // - `.enforce_equal()` prevents a malicious prover from claiming wrong + // public inputs that are not the honest `cf_x` computed in-circuit. + FpVar::new_input(cs.clone(), || cf_x.value())?.enforce_equal(&cf_x)?; + + Ok(z_i1) + } +} - Ok(()) +impl ConstraintSynthesizer> for AugmentedFCircuit +where + C1: Curve, + C2: Curve, + FC: FCircuit>, +{ + fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { + self.compute_next_state(cs).map(|_| ()) } } @@ -342,17 +350,21 @@ where pub mod tests { use super::*; use ark_bn254::{Fr, G1Projective as Projective}; - use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, CryptographicSponge}; + use ark_crypto_primitives::sponge::{ + constraints::AbsorbGadget, poseidon::PoseidonSponge, CryptographicSponge, + }; use ark_ff::BigInteger; + use ark_relations::r1cs::ConstraintSystem; use ark_std::UniformRand; use crate::folding::nova::nifs::nova::ChallengeGadget; use crate::transcript::poseidon::poseidon_canonical_config; + use crate::Error; // checks that the gadget and native implementations of the challenge computation match #[test] - fn test_challenge_gadget() { + fn test_challenge_gadget() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); let mut transcript = PoseidonSponge::::new(&poseidon_config); @@ -382,41 +394,32 @@ pub mod tests { &u_i, Some(&cmT), ); - let r = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap(); + let r = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).ok_or(Error::OutOfBounds)?; let cs = ConstraintSystem::::new_ref(); - let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash)).unwrap(); + let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash))?; let u_iVar = - CommittedInstanceVar::::new_witness(cs.clone(), || Ok(u_i.clone())) - .unwrap(); + CommittedInstanceVar::::new_witness(cs.clone(), || Ok(u_i.clone()))?; let U_iVar = - CommittedInstanceVar::::new_witness(cs.clone(), || Ok(U_i.clone())) - .unwrap(); - let cmTVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(cmT)).unwrap(); + CommittedInstanceVar::::new_witness(cs.clone(), || Ok(U_i.clone()))?; + let cmTVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(cmT))?; let mut transcriptVar = PoseidonSpongeVar::::new(cs.clone(), &poseidon_config); // compute the challenge in-circuit - let U_iVar_vec = [ - vec![U_iVar.u.clone()], - U_iVar.x.clone(), - U_iVar.cmE.to_constraint_field().unwrap(), - U_iVar.cmW.to_constraint_field().unwrap(), - ] - .concat(); let r_bitsVar = ChallengeGadget::>::get_challenge_gadget( &mut transcriptVar, pp_hashVar, - U_iVar_vec, + U_iVar.to_sponge_field_elements()?, u_iVar, Some(cmTVar), - ) - .unwrap(); - assert!(cs.is_satisfied().unwrap()); + )?; + assert!(cs.is_satisfied()?); // check that the natively computed and in-circuit computed hashes match - let rVar = Boolean::le_bits_to_fp_var(&r_bitsVar).unwrap(); - assert_eq!(rVar.value().unwrap(), r); - assert_eq!(r_bitsVar.value().unwrap(), r_bits); + let rVar = Boolean::le_bits_to_fp(&r_bitsVar)?; + assert_eq!(rVar.value()?, r); + assert_eq!(r_bitsVar.value()?, r_bits); + Ok(()) } } diff --git a/folding-schemes/src/folding/nova/decider.rs b/folding-schemes/src/folding/nova/decider.rs index 1f5e0090..df0d5ad7 100644 --- a/folding-schemes/src/folding/nova/decider.rs +++ b/folding-schemes/src/folding/nova/decider.rs @@ -2,10 +2,7 @@ /// DeciderEth from decider_eth.rs file. /// More details can be found at the documentation page: /// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-offchain.html -use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; use ark_ff::{BigInteger, PrimeField}; -use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_snark::SNARK; use ark_std::rand::{CryptoRng, RngCore}; @@ -16,21 +13,21 @@ use super::decider_circuits::{DeciderCircuit1, DeciderCircuit2}; use super::decider_eth_circuit::DeciderNovaGadget; use super::Nova; use crate::commitment::CommitmentScheme; +use crate::folding::circuits::cyclefold::CycleFoldCommittedInstance; use crate::folding::circuits::decider::DeciderEnabledNIFS; -use crate::folding::circuits::{ - cyclefold::{CycleFoldCommittedInstance, CycleFoldCommittedInstanceVar}, - CF2, +use crate::folding::traits::{ + CommittedInstanceOps, Dummy, Inputize, InputizeNonNative, WitnessOps, }; -use crate::folding::traits::{CommittedInstanceOps, Inputize, WitnessOps}; use crate::frontend::FCircuit; -use crate::Error; +use crate::transcript::poseidon::poseidon_canonical_config; +use crate::{Curve, Error}; use crate::{Decider as DeciderTrait, FoldingScheme}; #[derive(Debug, Clone, Eq, PartialEq)] pub struct Proof where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, S1: SNARK, @@ -69,7 +66,7 @@ where #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] pub struct VerifierParam where - C1: CurveGroup, + C1: Curve, CS1_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize, S1_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize, CS2_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize, @@ -84,11 +81,9 @@ where /// Onchain Decider, for ethereum use cases #[derive(Clone, Debug)] -pub struct Decider { +pub struct Decider { _c1: PhantomData, - _gc1: PhantomData, _c2: PhantomData, - _gc2: PhantomData, _fc: PhantomData, _cs1: PhantomData, _cs2: PhantomData, @@ -97,13 +92,11 @@ pub struct Decider { _fs: PhantomData, } -impl DeciderTrait - for Decider +impl DeciderTrait + for Decider where - C1: CurveGroup, - C2: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme< C1, @@ -120,21 +113,14 @@ where S1: SNARK, S2: SNARK, FS: FoldingScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, - for<'b> &'b GC1: GroupOpsBounds<'b, C1, GC1>, - for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>, // constrain FS into Nova, since this is a Decider specifically for Nova - Nova: From, + Nova: From, crate::folding::nova::ProverParams: From<>::ProverParam>, crate::folding::nova::VerifierParams: From<>::VerifierParam>, { - type PreprocessorParam = (FS::ProverParam, FS::VerifierParam); + type PreprocessorParam = ((FS::ProverParam, FS::VerifierParam), usize); type ProverParam = ProverParam; type Proof = Proof; @@ -150,11 +136,32 @@ where fn preprocess( mut rng: impl RngCore + CryptoRng, - prep_param: Self::PreprocessorParam, - fs: FS, + ((pp, vp), state_len): Self::PreprocessorParam, ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { - let circuit1 = DeciderCircuit1::::try_from(Nova::from(fs.clone()))?; - let circuit2 = DeciderCircuit2::::try_from(Nova::from(fs))?; + // get the FoldingScheme prover & verifier params from Nova + let nova_pp: as FoldingScheme>::ProverParam = + pp.into(); + let nova_vp: as FoldingScheme< + C1, + C2, + FC, + >>::VerifierParam = vp.into(); + let pp_hash = nova_vp.pp_hash()?; + + let circuit1 = DeciderCircuit1::::dummy(( + nova_vp.r1cs, + &nova_vp.cf_r1cs, + nova_vp.poseidon_config, + (), + (), + state_len, + 2, // Nova's running CommittedInstance contains 2 commitments + )); + let circuit2 = DeciderCircuit2::::dummy(( + nova_vp.cf_r1cs, + poseidon_canonical_config::(), + 2, // Nova's running CommittedInstance contains 2 commitments + )); // get the Groth16 specific setup for the circuits let (c1_g16_pk, c1_g16_vk) = S1::circuit_specific_setup(circuit1, &mut rng) @@ -162,21 +169,6 @@ where let (c2_g16_pk, c2_g16_vk) = S2::circuit_specific_setup(circuit2, &mut rng) .map_err(|e| Error::SNARKSetupFail(e.to_string()))?; - // get the FoldingScheme prover & verifier params from Nova - #[allow(clippy::type_complexity)] - let nova_pp: as FoldingScheme< - C1, - C2, - FC, - >>::ProverParam = prep_param.0.clone().into(); - #[allow(clippy::type_complexity)] - let nova_vp: as FoldingScheme< - C1, - C2, - FC, - >>::VerifierParam = prep_param.1.clone().into(); - - let pp_hash = nova_vp.pp_hash()?; let pp = Self::ProverParam { c1_snark_pp: c1_g16_pk, c1_cs_pp: nova_pp.cs_pp, @@ -198,7 +190,7 @@ where pp: Self::ProverParam, fs: FS, ) -> Result { - let circuit1 = DeciderCircuit1::::try_from(Nova::from(fs.clone()))?; + let circuit1 = DeciderCircuit1::::try_from(Nova::from(fs.clone()))?; let circuit2 = DeciderCircuit2::::try_from(Nova::from(fs))?; let cmT = circuit1.proof; @@ -280,14 +272,11 @@ where &[vp.pp_hash, i][..], &z_0, &z_i, - &U_final_commitments - .iter() - .flat_map(|c| c.inputize()) - .collect::>(), - &Inputize::, CycleFoldCommittedInstanceVar>::inputize(&cf_U), + &U_final_commitments.inputize_nonnative(), + &cf_U.inputize_nonnative(), &proof.cs1_challenges, &proof.cs1_proofs.iter().map(|p| p.eval).collect::>(), - &proof.cmT.inputize(), + &proof.cmT.inputize_nonnative(), ] .concat(); @@ -344,12 +333,8 @@ pub mod tests { // Note: do not use the MNTx_298 curves in practice, these are just for tests. Use the MNTx_753 // curves instead. - use ark_mnt4_298::{ - constraints::G1Var as GVar, Fr, G1Projective as Projective, MNT4_298 as MNT4, - }; - use ark_mnt6_298::{ - constraints::G1Var as GVar2, G1Projective as Projective2, MNT6_298 as MNT6, - }; + use ark_mnt4_298::{Fr, G1Projective as Projective, MNT4_298 as MNT4}; + use ark_mnt6_298::{G1Projective as Projective2, MNT6_298 as MNT6}; use std::time::Instant; use super::*; @@ -359,13 +344,11 @@ pub mod tests { use crate::transcript::poseidon::poseidon_canonical_config; #[test] - fn test_decider() { + fn test_decider() -> Result<(), Error> { // use Nova as FoldingScheme type N = Nova< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, MNT4>, KZG<'static, MNT6>, @@ -373,9 +356,7 @@ pub mod tests { >; type D = Decider< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, MNT4>, KZG<'static, MNT6>, @@ -387,32 +368,33 @@ pub mod tests { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let z_0 = vec![Fr::from(3_u32)]; let start = Instant::now(); let prep_param = PreprocessorParam::new(poseidon_config, F_circuit); - let nova_params = N::preprocess(&mut rng, &prep_param).unwrap(); + let nova_params = N::preprocess(&mut rng, &prep_param)?; println!("Nova preprocess, {:?}", start.elapsed()); let start = Instant::now(); - let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); + let mut nova = N::init(&nova_params, F_circuit, z_0.clone())?; println!("Nova initialized, {:?}", start.elapsed()); let start = Instant::now(); - nova.prove_step(&mut rng, vec![], None).unwrap(); + nova.prove_step(&mut rng, (), None)?; println!("prove_step, {:?}", start.elapsed()); - nova.prove_step(&mut rng, vec![], None).unwrap(); // do a 2nd step + nova.prove_step(&mut rng, (), None)?; // do a 2nd step let mut rng = rand::rngs::OsRng; // prepare the Decider prover & verifier params let start = Instant::now(); - let (decider_pp, decider_vp) = D::preprocess(&mut rng, nova_params, nova.clone()).unwrap(); + let (decider_pp, decider_vp) = + D::preprocess(&mut rng, (nova_params, F_circuit.state_len()))?; println!("Decider preprocess, {:?}", start.elapsed()); // decider proof generation let start = Instant::now(); - let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); + let proof = D::prove(rng, decider_pp, nova.clone())?; println!("Decider prove, {:?}", start.elapsed()); // decider proof verification @@ -425,9 +407,9 @@ pub mod tests { &nova.U_i.get_commitments(), &nova.u_i.get_commitments(), &proof, - ) - .unwrap(); + )?; assert!(verified); println!("Decider verify, {:?}", start.elapsed()); + Ok(()) } } diff --git a/folding-schemes/src/folding/nova/decider_circuits.rs b/folding-schemes/src/folding/nova/decider_circuits.rs index cba00ed3..db9a686d 100644 --- a/folding-schemes/src/folding/nova/decider_circuits.rs +++ b/folding-schemes/src/folding/nova/decider_circuits.rs @@ -2,10 +2,9 @@ /// DeciderEthCircuit. /// More details can be found at the documentation page: /// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-offchain.html -use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, Absorb, CryptographicSponge}; -use ark_ec::CurveGroup; +use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, CryptographicSponge}; use ark_ff::{BigInteger, PrimeField}; -use ark_r1cs_std::{fields::fp::FpVar, prelude::CurveVar, ToConstraintFieldGadget}; +use ark_r1cs_std::fields::fp::FpVar; use core::marker::PhantomData; use super::{ @@ -13,12 +12,8 @@ use super::{ nifs::{nova::NIFS, NIFSTrait}, CommittedInstance, Nova, Witness, }; -use crate::folding::{ - circuits::{CF1, CF2}, - traits::WitnessOps, -}; +use crate::folding::{circuits::CF1, traits::WitnessOps}; use crate::frontend::FCircuit; -use crate::Error; use crate::{ arith::r1cs::{circuits::R1CSMatricesVar, R1CS}, folding::circuits::decider::{ @@ -27,14 +22,14 @@ use crate::{ }, }; use crate::{commitment::CommitmentScheme, transcript::poseidon::poseidon_canonical_config}; +use crate::{Curve, Error}; /// Circuit that implements part of the in-circuit checks needed for the offchain verification over /// the Curve2's BaseField (=Curve1's ScalarField). -pub type DeciderCircuit1 = GenericOffchainDeciderCircuit1< +pub type DeciderCircuit1 = GenericOffchainDeciderCircuit1< C1, C2, - GC2, CommittedInstance, CommittedInstance, Witness, @@ -44,22 +39,17 @@ pub type DeciderCircuit1 = GenericOffchainDeciderCircuit1< >; impl< - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, const H: bool, - > TryFrom> for DeciderCircuit1 -where - CF1: Absorb, - ::BaseField: PrimeField, + > TryFrom> for DeciderCircuit1 { type Error = Error; - fn try_from(nova: Nova) -> Result { + fn try_from(nova: Nova) -> Result { let mut transcript = PoseidonSponge::::new(&nova.poseidon_config); // pp_hash is absorbed to transcript at the NIFS::prove call @@ -89,7 +79,6 @@ where .collect::, _>>()?; Ok(Self { - _gc2: PhantomData, _avar: PhantomData, arith: nova.r1cs, poseidon_config: nova.poseidon_config, @@ -117,21 +106,17 @@ where pub type DeciderCircuit2 = GenericOffchainDeciderCircuit2; impl< - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, const H: bool, - > TryFrom> for DeciderCircuit2 -where - CF1: Absorb, + > TryFrom> for DeciderCircuit2 { type Error = Error; - fn try_from(nova: Nova) -> Result { + fn try_from(nova: Nova) -> Result { // compute the Commitment Scheme challenges of the CycleFold instance commitments, used as // inputs in the circuit let poseidon_config = poseidon_canonical_config::(); @@ -167,9 +152,9 @@ where #[cfg(test)] pub mod tests { - use ark_pallas::{constraints::GVar, Fq, Fr, Projective}; + use ark_pallas::{Fq, Fr, Projective}; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; - use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_vesta::Projective as Projective2; use super::*; use crate::commitment::pedersen::Pedersen; @@ -179,18 +164,16 @@ pub mod tests { use crate::FoldingScheme; #[test] - fn test_decider_circuits() { + fn test_decider_circuits() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let z_0 = vec![Fr::from(3_u32)]; type N = Nova< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, Pedersen, Pedersen, @@ -205,26 +188,26 @@ pub mod tests { Pedersen, false, >::new(poseidon_config, F_circuit); - let nova_params = N::preprocess(&mut rng, &prep_param).unwrap(); + let nova_params = N::preprocess(&mut rng, &prep_param)?; // generate a Nova instance and do a step of it - let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); - nova.prove_step(&mut rng, vec![], None).unwrap(); + let mut nova = N::init(&nova_params, F_circuit, z_0.clone())?; + nova.prove_step(&mut rng, (), None)?; // verify the IVC let ivc_proof = nova.ivc_proof(); - N::verify(nova_params.1, ivc_proof).unwrap(); + N::verify(nova_params.1, ivc_proof)?; // load the DeciderCircuit 1 & 2 from the Nova instance - let decider_circuit1 = - DeciderCircuit1::::try_from(nova.clone()).unwrap(); - let decider_circuit2 = DeciderCircuit2::::try_from(nova).unwrap(); + let decider_circuit1 = DeciderCircuit1::::try_from(nova.clone())?; + let decider_circuit2 = DeciderCircuit2::::try_from(nova)?; // generate the constraints of both circuits and check that are satisfied by the inputs let cs1 = ConstraintSystem::::new_ref(); - decider_circuit1.generate_constraints(cs1.clone()).unwrap(); - assert!(cs1.is_satisfied().unwrap()); + decider_circuit1.generate_constraints(cs1.clone())?; + assert!(cs1.is_satisfied()?); let cs2 = ConstraintSystem::::new_ref(); - decider_circuit2.generate_constraints(cs2.clone()).unwrap(); - assert!(cs2.is_satisfied().unwrap()); + decider_circuit2.generate_constraints(cs2.clone())?; + assert!(cs2.is_satisfied()?); + Ok(()) } } diff --git a/folding-schemes/src/folding/nova/decider_eth.rs b/folding-schemes/src/folding/nova/decider_eth.rs index e765ed3e..57a36bfb 100644 --- a/folding-schemes/src/folding/nova/decider_eth.rs +++ b/folding-schemes/src/folding/nova/decider_eth.rs @@ -3,35 +3,37 @@ /// More details can be found at the documentation page: /// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html use ark_bn254::Bn254; -use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{AffineRepr, CurveGroup, Group}; -use ark_ff::{BigInteger, PrimeField}; use ark_groth16::Groth16; -use ark_r1cs_std::{prelude::CurveVar, ToConstraintFieldGadget}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_snark::SNARK; -use ark_std::rand::{CryptoRng, RngCore}; -use ark_std::{One, Zero}; +use ark_std::{ + rand::{CryptoRng, RngCore}, + One, Zero, +}; use core::marker::PhantomData; pub use super::decider_eth_circuit::DeciderEthCircuit; use super::decider_eth_circuit::DeciderNovaGadget; use super::{CommittedInstance, Nova}; -use crate::commitment::{ - kzg::{Proof as KZGProof, KZG}, - pedersen::Params as PedersenParams, - CommitmentScheme, -}; -use crate::folding::circuits::{decider::DeciderEnabledNIFS, CF2}; -use crate::folding::traits::{Inputize, WitnessOps}; +use crate::folding::circuits::decider::DeciderEnabledNIFS; +use crate::folding::traits::{InputizeNonNative, WitnessOps}; use crate::frontend::FCircuit; -use crate::Error; +use crate::utils::eth::ToEth; +use crate::{ + commitment::{ + kzg::{Proof as KZGProof, KZG}, + pedersen::Params as PedersenParams, + CommitmentScheme, + }, + folding::traits::Dummy, +}; +use crate::{Curve, Error}; use crate::{Decider as DeciderTrait, FoldingScheme}; #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] pub struct Proof where - C: CurveGroup, + C: Curve, CS: CommitmentScheme, S: SNARK, { @@ -49,7 +51,7 @@ where #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] pub struct VerifierParam where - C1: CurveGroup, + C1: Curve, CS_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize, S_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize, { @@ -60,11 +62,9 @@ where /// Onchain Decider, for ethereum use cases #[derive(Clone, Debug)] -pub struct Decider { +pub struct Decider { _c1: PhantomData, - _gc1: PhantomData, _c2: PhantomData, - _gc2: PhantomData, _fc: PhantomData, _cs1: PhantomData, _cs2: PhantomData, @@ -72,13 +72,11 @@ pub struct Decider { _fs: PhantomData, } -impl DeciderTrait - for Decider +impl DeciderTrait + for Decider where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, // CS1 is a KZG commitment, where challenge is C1::Fr elem CS1: CommitmentScheme< @@ -91,19 +89,14 @@ where CS2: CommitmentScheme>, S: SNARK, FS: FoldingScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, // constrain FS into Nova, since this is a Decider specifically for Nova - Nova: From, + Nova: From, crate::folding::nova::ProverParams: From<>::ProverParam>, crate::folding::nova::VerifierParams: From<>::VerifierParam>, { - type PreprocessorParam = (FS::ProverParam, FS::VerifierParam); + type PreprocessorParam = ((FS::ProverParam, FS::VerifierParam), usize); type ProverParam = (S::ProvingKey, CS1::ProverParams); type Proof = Proof; type VerifierParam = VerifierParam; @@ -112,30 +105,34 @@ where fn preprocess( mut rng: impl RngCore + CryptoRng, - prep_param: Self::PreprocessorParam, - fs: FS, + ((pp, vp), state_len): Self::PreprocessorParam, ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { - let circuit = DeciderEthCircuit::::try_from(Nova::from(fs))?; + // get the FoldingScheme prover & verifier params from Nova + let nova_pp: as FoldingScheme>::ProverParam = + pp.into(); + let nova_vp: as FoldingScheme< + C1, + C2, + FC, + >>::VerifierParam = vp.into(); + + let pp_hash = nova_vp.pp_hash()?; + + let circuit = DeciderEthCircuit::::dummy(( + nova_vp.r1cs, + nova_vp.cf_r1cs, + nova_pp.cf_cs_pp, + nova_pp.poseidon_config, + (), + (), + state_len, + 2, // Nova's running CommittedInstance contains 2 commitments + )); // get the Groth16 specific setup for the circuit let (g16_pk, g16_vk) = S::circuit_specific_setup(circuit, &mut rng) .map_err(|e| Error::SNARKSetupFail(e.to_string()))?; - // get the FoldingScheme prover & verifier params from Nova - #[allow(clippy::type_complexity)] - let nova_pp: as FoldingScheme< - C1, - C2, - FC, - >>::ProverParam = prep_param.0.clone().into(); - #[allow(clippy::type_complexity)] - let nova_vp: as FoldingScheme< - C1, - C2, - FC, - >>::VerifierParam = prep_param.1.clone().into(); - let pp_hash = nova_vp.pp_hash()?; - let pp = (g16_pk, nova_pp.cs_pp); let vp = Self::VerifierParam { pp_hash, @@ -152,7 +149,7 @@ where ) -> Result { let (snark_pk, cs_pk): (S::ProvingKey, CS1::ProverParams) = pp; - let circuit = DeciderEthCircuit::::try_from(Nova::from(folding_scheme))?; + let circuit = DeciderEthCircuit::::try_from(Nova::from(folding_scheme))?; let cmT = circuit.proof; let r = circuit.randomness; @@ -220,13 +217,10 @@ where &[pp_hash, i][..], &z_0, &z_i, - &U_final_commitments - .iter() - .flat_map(|c| c.inputize()) - .collect::>(), + &U_final_commitments.inputize_nonnative(), &proof.kzg_challenges, &proof.kzg_proofs.iter().map(|p| p.eval).collect::>(), - &proof.cmT.inputize(), + &proof.cmT.inputize_nonnative(), ] .concat(); @@ -261,60 +255,30 @@ pub fn prepare_calldata( incoming_instance: &CommittedInstance, proof: Proof, Groth16>, ) -> Result, Error> { - Ok(vec![ - function_signature_check.to_vec(), - i.into_bigint().to_bytes_be(), // i - z_0.iter() - .flat_map(|v| v.into_bigint().to_bytes_be()) - .collect::>(), // z_0 - z_i.iter() - .flat_map(|v| v.into_bigint().to_bytes_be()) - .collect::>(), // z_i - point_to_eth_format(running_instance.cmW.into_affine())?, - point_to_eth_format(running_instance.cmE.into_affine())?, - point_to_eth_format(incoming_instance.cmW.into_affine())?, - point_to_eth_format(proof.cmT.into_affine())?, // cmT - proof.r.into_bigint().to_bytes_be(), // r - point_to_eth_format(proof.snark_proof.a)?, // pA - point2_to_eth_format(proof.snark_proof.b)?, // pB - point_to_eth_format(proof.snark_proof.c)?, // pC - proof.kzg_challenges[0].into_bigint().to_bytes_be(), // challenge_W - proof.kzg_challenges[1].into_bigint().to_bytes_be(), // challenge_E - proof.kzg_proofs[0].eval.into_bigint().to_bytes_be(), // eval W - proof.kzg_proofs[1].eval.into_bigint().to_bytes_be(), // eval E - point_to_eth_format(proof.kzg_proofs[0].proof.into_affine())?, // W kzg_proof - point_to_eth_format(proof.kzg_proofs[1].proof.into_affine())?, // E kzg_proof - ] - .concat()) -} - -fn point_to_eth_format(p: C) -> Result, Error> -where - C::BaseField: PrimeField, -{ - // the encoding of the additive identity is [0, 0] on the EVM - let zero_point = (&C::BaseField::zero(), &C::BaseField::zero()); - let (x, y) = p.xy().unwrap_or(zero_point); - - Ok([x.into_bigint().to_bytes_be(), y.into_bigint().to_bytes_be()].concat()) -} -fn point2_to_eth_format(p: ark_bn254::G2Affine) -> Result, Error> { - let zero_point = (&ark_bn254::Fq2::zero(), &ark_bn254::Fq2::zero()); - let (x, y) = p.xy().unwrap_or(zero_point); - Ok([ - x.c1.into_bigint().to_bytes_be(), - x.c0.into_bigint().to_bytes_be(), - y.c1.into_bigint().to_bytes_be(), - y.c0.into_bigint().to_bytes_be(), + function_signature_check.to_eth(), + i.to_eth(), // i + z_0.to_eth(), // z_0 + z_i.to_eth(), // z_i + running_instance.cmW.to_eth(), + running_instance.cmE.to_eth(), + incoming_instance.cmW.to_eth(), + proof.cmT.to_eth(), // cmT + proof.r.to_eth(), // r + proof.snark_proof.to_eth(), // pA, pB, pC + proof.kzg_challenges.to_eth(), // challenge_W, challenge_E + proof.kzg_proofs[0].eval.to_eth(), // eval W + proof.kzg_proofs[1].eval.to_eth(), // eval E + proof.kzg_proofs[0].proof.to_eth(), // W kzg_proof + proof.kzg_proofs[1].proof.to_eth(), // E kzg_proof ] .concat()) } #[cfg(test)] pub mod tests { - use ark_bn254::{constraints::GVar, Fr, G1Projective as Projective}; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_bn254::{Fr, G1Projective as Projective}; + use ark_grumpkin::Projective as Projective2; use std::time::Instant; use super::*; @@ -325,13 +289,11 @@ pub mod tests { use crate::transcript::poseidon::poseidon_canonical_config; #[test] - fn test_decider() { + fn test_decider() -> Result<(), Error> { // use Nova as FoldingScheme type N = Nova< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, Bn254>, Pedersen, @@ -339,9 +301,7 @@ pub mod tests { >; type D = Decider< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, Bn254>, Pedersen, @@ -352,27 +312,28 @@ pub mod tests { let mut rng = rand::rngs::OsRng; let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let z_0 = vec![Fr::from(3_u32)]; let preprocessor_param = PreprocessorParam::new(poseidon_config, F_circuit); - let nova_params = N::preprocess(&mut rng, &preprocessor_param).unwrap(); + let nova_params = N::preprocess(&mut rng, &preprocessor_param)?; let start = Instant::now(); - let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); + let mut nova = N::init(&nova_params, F_circuit, z_0.clone())?; println!("Nova initialized, {:?}", start.elapsed()); // prepare the Decider prover & verifier params - let (decider_pp, decider_vp) = D::preprocess(&mut rng, nova_params, nova.clone()).unwrap(); + let (decider_pp, decider_vp) = + D::preprocess(&mut rng, (nova_params, F_circuit.state_len()))?; let start = Instant::now(); - nova.prove_step(&mut rng, vec![], None).unwrap(); + nova.prove_step(&mut rng, (), None)?; println!("prove_step, {:?}", start.elapsed()); - nova.prove_step(&mut rng, vec![], None).unwrap(); // do a 2nd step + nova.prove_step(&mut rng, (), None)?; // do a 2nd step // decider proof generation let start = Instant::now(); - let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); + let proof = D::prove(rng, decider_pp, nova.clone())?; println!("Decider prove, {:?}", start.elapsed()); // decider proof verification @@ -385,8 +346,7 @@ pub mod tests { &nova.U_i.get_commitments(), &nova.u_i.get_commitments(), &proof, - ) - .unwrap(); + )?; assert!(verified); println!("Decider verify, {:?}", start.elapsed()); @@ -399,22 +359,20 @@ pub mod tests { &nova.U_i.get_commitments(), &nova.u_i.get_commitments(), &proof, - ) - .unwrap(); + )?; assert!(verified); + Ok(()) } // Test to check the serialization and deserialization of diverse Decider related parameters. // This test is the same test as `test_decider` but it serializes values and then uses the // deserialized values to continue the checks. #[test] - fn test_decider_serialization() { + fn test_decider_serialization() -> Result<(), Error> { // use Nova as FoldingScheme type N = Nova< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, Bn254>, Pedersen, @@ -422,9 +380,7 @@ pub mod tests { >; type D = Decider< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, Bn254>, Pedersen, @@ -435,32 +391,26 @@ pub mod tests { let mut rng = rand::rngs::OsRng; let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let z_0 = vec![Fr::from(3_u32)]; let preprocessor_param = PreprocessorParam::new(poseidon_config, F_circuit); - let nova_params = N::preprocess(&mut rng, &preprocessor_param).unwrap(); - - let start = Instant::now(); - let nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); - println!("Nova initialized, {:?}", start.elapsed()); + let nova_params = N::preprocess(&mut rng, &preprocessor_param)?; // prepare the Decider prover & verifier params let (decider_pp, decider_vp) = - D::preprocess(&mut rng, nova_params.clone(), nova.clone()).unwrap(); + D::preprocess(&mut rng, (nova_params.clone(), F_circuit.state_len()))?; // serialize the Nova params. These params are the trusted setup of the commitment schemes used // (ie. KZG & Pedersen in this case) let mut nova_pp_serialized = vec![]; nova_params .0 - .serialize_compressed(&mut nova_pp_serialized) - .unwrap(); + .serialize_compressed(&mut nova_pp_serialized)?; let mut nova_vp_serialized = vec![]; nova_params .1 - .serialize_compressed(&mut nova_vp_serialized) - .unwrap(); + .serialize_compressed(&mut nova_vp_serialized)?; // deserialize the Nova params. This would be done by the client reading from a file let nova_pp_deserialized = NovaProverParams::< Projective, @@ -469,8 +419,7 @@ pub mod tests { Pedersen, >::deserialize_compressed( &mut nova_pp_serialized.as_slice() - ) - .unwrap(); + )?; let nova_vp_deserialized = as CommitmentScheme>::VerifierParams, as SNARK>::VerifyingKey, - >::deserialize_compressed(&mut decider_vp_serialized.as_slice()) - .unwrap(); + >::deserialize_compressed(&mut decider_vp_serialized.as_slice())?; let proof_deserialized = Proof::, Groth16>::deserialize_compressed( &mut proof_serialized.as_slice(), - ) - .unwrap(); + )?; // deserialize the public inputs from the single packet 'public_inputs_serialized' let mut reader = public_inputs_serialized.as_slice(); - let i_deserialized = Fr::deserialize_compressed(&mut reader).unwrap(); - let z_0_deserialized = Vec::::deserialize_compressed(&mut reader).unwrap(); - let z_i_deserialized = Vec::::deserialize_compressed(&mut reader).unwrap(); + let i_deserialized = Fr::deserialize_compressed(&mut reader)?; + let z_0_deserialized = Vec::::deserialize_compressed(&mut reader)?; + let z_i_deserialized = Vec::::deserialize_compressed(&mut reader)?; // decider proof verification using the deserialized data let verified = D::verify( @@ -563,8 +502,8 @@ pub mod tests { &nova.U_i.get_commitments(), &nova.u_i.get_commitments(), &proof_deserialized, - ) - .unwrap(); + )?; assert!(verified); + Ok(()) } } diff --git a/folding-schemes/src/folding/nova/decider_eth_circuit.rs b/folding-schemes/src/folding/nova/decider_eth_circuit.rs index 7ef2ee0c..eec2a0a0 100644 --- a/folding-schemes/src/folding/nova/decider_eth_circuit.rs +++ b/folding-schemes/src/folding/nova/decider_eth_circuit.rs @@ -5,15 +5,12 @@ use ark_crypto_primitives::sponge::{ constraints::CryptographicSpongeVar, poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, - Absorb, CryptographicSponge, + CryptographicSponge, }; -use ark_ec::CurveGroup; use ark_ff::{BigInteger, PrimeField}; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, fields::fp::FpVar, - prelude::CurveVar, - ToConstraintFieldGadget, }; use ark_relations::r1cs::{Namespace, SynthesisError}; use ark_std::{borrow::Borrow, marker::PhantomData}; @@ -26,31 +23,27 @@ use super::{ use crate::commitment::{pedersen::Params as PedersenParams, CommitmentScheme}; use crate::folding::{ circuits::{ - decider::on_chain::GenericOnchainDeciderCircuit, nonnative::affine::NonNativeAffineVar, - CF1, CF2, + decider::on_chain::GenericOnchainDeciderCircuit, nonnative::affine::NonNativeAffineVar, CF1, }, traits::{WitnessOps, WitnessVarOps}, }; use crate::frontend::FCircuit; -use crate::Error; use crate::{ arith::r1cs::{circuits::R1CSMatricesVar, R1CS}, folding::circuits::decider::{DeciderEnabledNIFS, EvalGadget, KZGChallengesGadget}, }; +use crate::{Curve, Error}; /// In-circuit representation of the Witness associated to the CommittedInstance. #[derive(Debug, Clone)] -pub struct WitnessVar { +pub struct WitnessVar { pub E: Vec>, pub rE: FpVar, pub W: Vec>, pub rW: FpVar, } -impl AllocVar, CF1> for WitnessVar -where - C: CurveGroup, -{ +impl AllocVar, CF1> for WitnessVar { fn new_variable>>( cs: impl Into>>, f: impl FnOnce() -> Result, @@ -74,16 +67,15 @@ where } } -impl WitnessVarOps for WitnessVar { +impl WitnessVarOps for WitnessVar { fn get_openings(&self) -> Vec<(&[FpVar], FpVar)> { vec![(&self.W, self.rW.clone()), (&self.E, self.rE.clone())] } } -pub type DeciderEthCircuit = GenericOnchainDeciderCircuit< +pub type DeciderEthCircuit = GenericOnchainDeciderCircuit< C1, C2, - GC2, CommittedInstance, CommittedInstance, Witness, @@ -94,23 +86,18 @@ pub type DeciderEthCircuit = GenericOnchainDeciderCircuit< /// returns an instance of the DeciderEthCircuit from the given Nova struct impl< - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, // enforce that the CS2 is Pedersen commitment scheme, since we're at Ethereum's EVM decider CS2: CommitmentScheme>, const H: bool, - > TryFrom> for DeciderEthCircuit -where - CF1: Absorb, - ::BaseField: PrimeField, + > TryFrom> for DeciderEthCircuit { type Error = Error; - fn try_from(nova: Nova) -> Result { + fn try_from(nova: Nova) -> Result { let mut transcript = PoseidonSponge::::new(&nova.poseidon_config); // compute the U_{i+1}, W_{i+1} @@ -139,7 +126,6 @@ where .collect::, _>>()?; Ok(Self { - _gc2: PhantomData, _avar: PhantomData, arith: nova.r1cs, cf_arith: nova.cf_r1cs, @@ -167,11 +153,9 @@ where pub struct DeciderNovaGadget; -impl +impl DeciderEnabledNIFS, CommittedInstance, Witness, R1CS>> for DeciderNovaGadget -where - CF1: Absorb, { type ProofDummyCfg = (); type Proof = C; @@ -216,9 +200,9 @@ where #[cfg(test)] pub mod tests { - use ark_pallas::{constraints::GVar, Fr, Projective}; + use ark_pallas::{Fr, Projective}; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; - use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_vesta::Projective as Projective2; use super::*; use crate::commitment::pedersen::Pedersen; @@ -228,18 +212,16 @@ pub mod tests { use crate::FoldingScheme; #[test] - fn test_decider_circuit() { + fn test_decider_circuit() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let z_0 = vec![Fr::from(3_u32)]; type N = Nova< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, Pedersen, Pedersen, @@ -254,23 +236,24 @@ pub mod tests { Pedersen, false, >::new(poseidon_config, F_circuit); - let nova_params = N::preprocess(&mut rng, &prep_param).unwrap(); + let nova_params = N::preprocess(&mut rng, &prep_param)?; // generate a Nova instance and do a step of it - let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); - nova.prove_step(&mut rng, vec![], None).unwrap(); + let mut nova = N::init(&nova_params, F_circuit, z_0.clone())?; + nova.prove_step(&mut rng, (), None)?; let ivc_proof = nova.ivc_proof(); - N::verify(nova_params.1, ivc_proof).unwrap(); + N::verify(nova_params.1, ivc_proof)?; // load the DeciderEthCircuit from the generated Nova instance - let decider_circuit = - DeciderEthCircuit::::try_from(nova).unwrap(); + let decider_circuit = DeciderEthCircuit::::try_from(nova)?; let cs = ConstraintSystem::::new_ref(); // generate the constraints and check that are satisfied by the inputs - decider_circuit.generate_constraints(cs.clone()).unwrap(); - assert!(cs.is_satisfied().unwrap()); + decider_circuit.generate_constraints(cs.clone())?; + assert!(cs.is_satisfied()?); + + Ok(()) } /// This test is like the test `test_relaxed_r1cs_nonnative_circuit` (from diff --git a/folding-schemes/src/folding/nova/mod.rs b/folding-schemes/src/folding/nova/mod.rs index ea24dbcb..16f0c0b4 100644 --- a/folding-schemes/src/folding/nova/mod.rs +++ b/folding-schemes/src/folding/nova/mod.rs @@ -8,35 +8,31 @@ use ark_crypto_primitives::sponge::{ poseidon::{PoseidonConfig, PoseidonSponge}, Absorb, CryptographicSponge, }; -use ark_ec::{CurveGroup, Group}; use ark_ff::{BigInteger, PrimeField}; -use ark_r1cs_std::{prelude::CurveVar, ToConstraintFieldGadget}; +use ark_r1cs_std::R1CSVar; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Valid}; -use ark_std::fmt::Debug; -use ark_std::rand::RngCore; -use ark_std::{One, UniformRand, Zero}; -use core::marker::PhantomData; - -use crate::folding::circuits::cyclefold::{ - fold_cyclefold_circuit, CycleFoldCircuit, CycleFoldCommittedInstance, CycleFoldConfig, - CycleFoldWitness, -}; -use crate::folding::{ - circuits::{CF1, CF2}, - traits::Dummy, -}; +use ark_std::{cmp::max, fmt::Debug, marker::PhantomData, rand::RngCore, One, UniformRand, Zero}; + +use crate::folding::{circuits::CF1, traits::Dummy}; use crate::frontend::FCircuit; -use crate::transcript::{poseidon::poseidon_canonical_config, AbsorbNonNative, Transcript}; +use crate::transcript::{poseidon::poseidon_canonical_config, Transcript}; use crate::utils::vec::is_zero_vec; -use crate::Error; use crate::FoldingScheme; use crate::{ arith::r1cs::{extract_r1cs, extract_w_x, R1CS}, constants::NOVA_N_BITS_RO, - utils::{get_cm_coordinates, pp_hash}, + utils::pp_hash, +}; +use crate::{ + arith::Arith, + folding::circuits::cyclefold::{ + fold_cyclefold_circuit, CycleFoldCircuit, CycleFoldCommittedInstance, CycleFoldConfig, + CycleFoldWitness, + }, }; -use crate::{arith::Arith, commitment::CommitmentScheme}; +use crate::{arith::ArithRelation, commitment::CommitmentScheme}; +use crate::{Curve, Error}; use decider_eth_circuit::WitnessVar; pub mod circuits; @@ -59,33 +55,32 @@ pub mod decider_eth_circuit; use super::traits::{CommittedInstanceOps, Inputize, WitnessOps}; /// Configuration for Nova's CycleFold circuit -pub struct NovaCycleFoldConfig { +pub struct NovaCycleFoldConfig { _c: PhantomData, } -impl CycleFoldConfig for NovaCycleFoldConfig { +impl CycleFoldConfig for NovaCycleFoldConfig { const RANDOMNESS_BIT_LENGTH: usize = NOVA_N_BITS_RO; // Number of points to be folded in the CycleFold circuit, in Nova's case, this is a fixed // amount: // 2 points to be folded. const N_INPUT_POINTS: usize = 2; type C = C; - type F = C::BaseField; } /// CycleFold circuit for computing random linear combinations of group elements /// in Nova instances. -pub type NovaCycleFoldCircuit = CycleFoldCircuit, GC>; +pub type NovaCycleFoldCircuit = CycleFoldCircuit>; #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct CommittedInstance { +pub struct CommittedInstance { pub cmE: C, pub u: C::ScalarField, pub cmW: C, pub x: Vec, } -impl Dummy for CommittedInstance { +impl Dummy for CommittedInstance { fn dummy(io_len: usize) -> Self { Self { cmE: C::zero(), @@ -96,16 +91,13 @@ impl Dummy for CommittedInstance { } } -impl Dummy<&R1CS>> for CommittedInstance { +impl Dummy<&R1CS>> for CommittedInstance { fn dummy(r1cs: &R1CS>) -> Self { - Self::dummy(r1cs.l) + Self::dummy(r1cs.n_public_inputs()) } } -impl Absorb for CommittedInstance -where - C::ScalarField: Absorb, -{ +impl Absorb for CommittedInstance { fn to_sponge_bytes(&self, dest: &mut Vec) { C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest); } @@ -113,19 +105,12 @@ where fn to_sponge_field_elements(&self, dest: &mut Vec) { self.u.to_sponge_field_elements(dest); self.x.to_sponge_field_elements(dest); - // We cannot call `to_native_sponge_field_elements(dest)` directly, as - // `to_native_sponge_field_elements` needs `F` to be `C::ScalarField`, - // but here `F` is a generic `PrimeField`. - self.cmE - .to_native_sponge_field_elements_as_vec() - .to_sponge_field_elements(dest); - self.cmW - .to_native_sponge_field_elements_as_vec() - .to_sponge_field_elements(dest); + self.cmE.to_native_sponge_field_elements(dest); + self.cmW.to_native_sponge_field_elements(dest); } } -impl CommittedInstanceOps for CommittedInstance { +impl CommittedInstanceOps for CommittedInstance { type Var = CommittedInstanceVar; fn get_commitments(&self) -> Vec { @@ -137,27 +122,29 @@ impl CommittedInstanceOps for CommittedInstance { } } -impl Inputize> for CommittedInstance { - fn inputize(&self) -> Vec { +impl Inputize> for CommittedInstance { + /// Returns the internal representation in the same order as how the value + /// is allocated in `CommittedInstanceVar::new_input`. + fn inputize(&self) -> Vec> { [ &[self.u][..], &self.x, - &self.cmE.inputize(), - &self.cmW.inputize(), + &self.cmE.inputize_nonnative(), + &self.cmW.inputize_nonnative(), ] .concat() } } #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Witness { +pub struct Witness { pub E: Vec, pub rE: C::ScalarField, pub W: Vec, pub rW: C::ScalarField, } -impl Witness { +impl Witness { pub fn new(w: Vec, e_len: usize, mut rng: impl RngCore) -> Self { let (rW, rE) = if H { ( @@ -195,18 +182,18 @@ impl Witness { } } -impl Dummy<&R1CS>> for Witness { +impl Dummy<&R1CS>> for Witness { fn dummy(r1cs: &R1CS>) -> Self { Self { - E: vec![C::ScalarField::zero(); r1cs.A.n_rows], + E: vec![C::ScalarField::zero(); r1cs.n_constraints()], rE: C::ScalarField::zero(), - W: vec![C::ScalarField::zero(); r1cs.A.n_cols - 1 - r1cs.l], + W: vec![C::ScalarField::zero(); r1cs.n_witnesses()], rW: C::ScalarField::zero(), } } } -impl WitnessOps for Witness { +impl WitnessOps for Witness { type Var = WitnessVar; fn get_openings(&self) -> Vec<(&[C::ScalarField], C::ScalarField)> { @@ -217,8 +204,8 @@ impl WitnessOps for Witness { #[derive(Debug, Clone)] pub struct PreprocessorParam where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, @@ -234,8 +221,8 @@ where impl PreprocessorParam where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, @@ -256,8 +243,8 @@ where #[derive(Debug, Clone)] pub struct ProverParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -271,8 +258,8 @@ where impl Valid for ProverParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -291,8 +278,8 @@ where } impl CanonicalSerialize for ProverParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -311,8 +298,8 @@ where } impl CanonicalDeserialize for ProverParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -335,8 +322,8 @@ where #[derive(Debug, Clone)] pub struct VerifierParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -354,8 +341,8 @@ where impl Valid for VerifierParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -367,8 +354,8 @@ where } impl CanonicalSerialize for VerifierParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -388,8 +375,8 @@ where impl VerifierParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -408,8 +395,8 @@ where #[derive(PartialEq, Eq, Debug, Clone, CanonicalSerialize, CanonicalDeserialize)] pub struct IVCProof where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, { // current step of the IVC pub i: C1::ScalarField, @@ -432,19 +419,14 @@ where /// [CycleFold](https://eprint.iacr.org/2023/1192.pdf), following the FoldingScheme trait /// The `H` const generic specifies whether the homorphic commitment scheme is blinding #[derive(Clone, Debug)] -pub struct Nova +pub struct Nova where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, { - _gc1: PhantomData, - _c2: PhantomData, - _gc2: PhantomData, /// R1CS of the Augmented Function circuit pub r1cs: R1CS, /// R1CS of the CycleFold circuit @@ -474,21 +456,15 @@ where pub cf_U_i: CycleFoldCommittedInstance, } -impl FoldingScheme - for Nova +impl FoldingScheme + for Nova where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, + C1: Curve, { type PreprocessorParam = PreprocessorParam; type ProverParam = ProverParams; @@ -524,7 +500,7 @@ where let f_circuit = FC::new(fc_params)?; let cs = ConstraintSystem::::new_ref(); let augmented_F_circuit = - AugmentedFCircuit::::empty(&poseidon_config, f_circuit.clone()); + AugmentedFCircuit::::empty(&poseidon_config, f_circuit.clone()); augmented_F_circuit.generate_constraints(cs.clone())?; cs.finalize(); let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; @@ -532,7 +508,7 @@ where // CycleFold circuit R1CS let cs2 = ConstraintSystem::::new_ref(); - let cf_circuit = NovaCycleFoldCircuit::::empty(); + let cf_circuit = NovaCycleFoldCircuit::::empty(); cf_circuit.generate_constraints(cs2.clone())?; cs2.finalize(); let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; @@ -555,16 +531,30 @@ where prep_param: &Self::PreprocessorParam, ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { let (r1cs, cf_r1cs) = - get_r1cs::(&prep_param.poseidon_config, prep_param.F.clone())?; + get_r1cs::(&prep_param.poseidon_config, prep_param.F.clone())?; // if cs params exist, use them, if not, generate new ones let (cs_pp, cs_vp) = match (&prep_param.cs_pp, &prep_param.cs_vp) { (Some(cs_pp), Some(cs_vp)) => (cs_pp.clone(), cs_vp.clone()), - _ => CS1::setup(&mut rng, r1cs.A.n_rows)?, + _ => CS1::setup( + &mut rng, + // `CS1` is for committing to Nova's witness vector `w` and + // error term `e`, where the length of `e` is the number of + // constraints, so we set `len` to the maximum of `e` and `w`'s + // lengths. + max(r1cs.n_constraints(), r1cs.n_witnesses()), + )?, }; let (cf_cs_pp, cf_cs_vp) = match (&prep_param.cf_cs_pp, &prep_param.cf_cs_vp) { (Some(cf_cs_pp), Some(cf_cs_vp)) => (cf_cs_pp.clone(), cf_cs_vp.clone()), - _ => CS2::setup(&mut rng, cf_r1cs.A.n_rows)?, + _ => CS2::setup( + &mut rng, + // `CS2` is for committing to CycleFold's witness vector `w` and + // error term `e`, where the length of `e` is the number of + // constraints, so we set `len` to the maximum of `e` and `w`'s + // lengths. + max(cf_r1cs.n_constraints(), cf_r1cs.n_witnesses()), + )?, }; let prover_params = ProverParams:: { @@ -596,8 +586,8 @@ where let cs2 = ConstraintSystem::::new_ref(); let augmented_F_circuit = - AugmentedFCircuit::::empty(&pp.poseidon_config, F.clone()); - let cf_circuit = NovaCycleFoldCircuit::::empty(); + AugmentedFCircuit::::empty(&pp.poseidon_config, F.clone()); + let cf_circuit = NovaCycleFoldCircuit::::empty(); augmented_F_circuit.generate_constraints(cs.clone())?; cs.finalize(); @@ -620,9 +610,6 @@ where // W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the // R1CS that we're working with. Ok(Self { - _gc1: PhantomData, - _c2: PhantomData, - _gc2: PhantomData, r1cs, cf_r1cs, poseidon_config: pp.poseidon_config.clone(), @@ -647,8 +634,8 @@ where fn prove_step( &mut self, mut rng: impl RngCore, - external_inputs: Vec, - // Nova does not support multi-instances folding + external_inputs: FC::ExternalInputs, + // Nova does not support multi-instances folding (by design) _other_instances: Option, ) -> Result<(), Error> { // ensure that commitments are blinding if user has specified so. @@ -671,7 +658,7 @@ where // `transcript` is for challenge generation. let mut transcript = sponge.clone(); - let augmented_F_circuit: AugmentedFCircuit; + let augmented_F_circuit: AugmentedFCircuit; // Nova does not support (by design) multi-instances folding if _other_instances.is_some() { @@ -686,14 +673,6 @@ where self.F.state_len(), )); } - if external_inputs.len() != self.F.external_inputs_len() { - return Err(Error::NotSameLength( - "F.external_inputs_len()".to_string(), - self.F.external_inputs_len(), - "external_inputs.len()".to_string(), - external_inputs.len(), - )); - } if self.i > C1::ScalarField::from_le_bytes_mod_order(&usize::MAX.to_le_bytes()) { return Err(Error::MaxStep); @@ -715,10 +694,6 @@ where i_usize = usize::from_le_bytes(i_bytes); } - let z_i1 = self - .F - .step_native(i_usize, self.z_i.clone(), external_inputs.clone())?; - // fold Nova instances let (W_i1, U_i1, cmT, r_bits): (Witness, CommittedInstance, C1, Vec) = NIFS::, H>::prove( @@ -731,26 +706,10 @@ where &self.w_i, &self.u_i, )?; - let r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&r_bits)) - .ok_or(Error::OutOfBounds)?; - - // folded instance output (public input, x) - // u_{i+1}.x[0] = H(i+1, z_0, z_{i+1}, U_{i+1}) - let u_i1_x = U_i1.hash( - &sponge, - self.pp_hash, - self.i + C1::ScalarField::one(), - &self.z_0, - &z_i1, - ); - // u_{i+1}.x[1] = H(cf_U_{i+1}) - let cf_u_i1_x: C1::ScalarField; if self.i == C1::ScalarField::zero() { - cf_u_i1_x = self.cf_U_i.hash_cyclefold(&sponge, self.pp_hash); // base case - augmented_F_circuit = AugmentedFCircuit:: { - _gc2: PhantomData, + augmented_F_circuit = AugmentedFCircuit:: { poseidon_config: self.poseidon_config.clone(), pp_hash: Some(self.pp_hash), i: Some(C1::ScalarField::zero()), // = i=0 @@ -764,13 +723,11 @@ where U_i1_cmW: Some(U_i1.cmW), cmT: Some(cmT), F: self.F.clone(), - x: Some(u_i1_x), cf1_u_i_cmW: None, cf2_u_i_cmW: None, cf_U_i: None, cf1_cmT: None, cf2_cmT: None, - cf_x: Some(cf_u_i1_x), }; #[cfg(test)] @@ -785,60 +742,33 @@ where } } else { // CycleFold part: - // get the vector used as public inputs 'x' in the CycleFold circuit - // cyclefold circuit for cmW - let cfW_u_i_x = [ - vec![r_Fq], - get_cm_coordinates(&self.U_i.cmW), - get_cm_coordinates(&self.u_i.cmW), - get_cm_coordinates(&U_i1.cmW), - ] - .concat(); - // cyclefold circuit for cmE - let cfE_u_i_x = [ - vec![r_Fq], - get_cm_coordinates(&self.U_i.cmE), - get_cm_coordinates(&cmT), - get_cm_coordinates(&U_i1.cmE), - ] - .concat(); - - let cfW_circuit = NovaCycleFoldCircuit:: { - _gc: PhantomData, + let cfW_circuit = NovaCycleFoldCircuit:: { r_bits: Some(r_bits.clone()), points: Some(vec![self.U_i.clone().cmW, self.u_i.clone().cmW]), - x: Some(cfW_u_i_x.clone()), }; - let cfE_circuit = NovaCycleFoldCircuit:: { - _gc: PhantomData, + let cfE_circuit = NovaCycleFoldCircuit:: { r_bits: Some(r_bits.clone()), points: Some(vec![self.U_i.clone().cmE, cmT]), - x: Some(cfE_u_i_x.clone()), }; // fold self.cf_U_i + cfW_U -> folded running with cfW - let (_cfW_w_i, cfW_u_i, cfW_W_i1, cfW_U_i1, cfW_cmT, _) = self.fold_cyclefold_circuit( + let (cfW_u_i, cfW_W_i1, cfW_U_i1, cfW_cmT) = self.fold_cyclefold_circuit( &mut transcript, self.cf_W_i.clone(), // CycleFold running instance witness self.cf_U_i.clone(), // CycleFold running instance - cfW_u_i_x, cfW_circuit, &mut rng, )?; // fold [the output from folding self.cf_U_i + cfW_U] + cfE_U = folded_running_with_cfW + cfE - let (_cfE_w_i, cfE_u_i, cf_W_i1, cf_U_i1, cf_cmT, _) = self.fold_cyclefold_circuit( + let (cfE_u_i, cf_W_i1, cf_U_i1, cf_cmT) = self.fold_cyclefold_circuit( &mut transcript, cfW_W_i1, cfW_U_i1.clone(), - cfE_u_i_x, cfE_circuit, &mut rng, )?; - cf_u_i1_x = cf_U_i1.hash_cyclefold(&sponge, self.pp_hash); - - augmented_F_circuit = AugmentedFCircuit:: { - _gc2: PhantomData, + augmented_F_circuit = AugmentedFCircuit:: { poseidon_config: self.poseidon_config.clone(), pp_hash: Some(self.pp_hash), i: Some(self.i), @@ -852,41 +782,29 @@ where U_i1_cmW: Some(U_i1.cmW), cmT: Some(cmT), F: self.F.clone(), - x: Some(u_i1_x), // cyclefold values cf1_u_i_cmW: Some(cfW_u_i.cmW), cf2_u_i_cmW: Some(cfE_u_i.cmW), cf_U_i: Some(self.cf_U_i.clone()), cf1_cmT: Some(cfW_cmT), cf2_cmT: Some(cf_cmT), - cf_x: Some(cf_u_i1_x), }; self.cf_W_i = cf_W_i1; self.cf_U_i = cf_U_i1; - - #[cfg(test)] - { - cfW_u_i.check_incoming()?; - cfE_u_i.check_incoming()?; - self.cf_r1cs.check_relation(&_cfW_w_i, &cfW_u_i)?; - self.cf_r1cs.check_relation(&_cfE_w_i, &cfE_u_i)?; - self.cf_r1cs.check_relation(&self.cf_W_i, &self.cf_U_i)?; - } } let cs = ConstraintSystem::::new_ref(); - augmented_F_circuit.generate_constraints(cs.clone())?; + let z_i1 = augmented_F_circuit + .compute_next_state(cs.clone())? + .value()?; #[cfg(test)] - assert!(cs.is_satisfied().unwrap()); + assert!(cs.is_satisfied()?); let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let (w_i1, x_i1) = extract_w_x::(&cs); - if x_i1[0] != u_i1_x || x_i1[1] != cf_u_i1_x { - return Err(Error::NotEqual); - } #[cfg(test)] if x_i1.len() != 2 { @@ -896,7 +814,7 @@ where // set values for next iteration self.i += C1::ScalarField::one(); self.z_i = z_i1; - self.w_i = Witness::::new::(w_i1, self.r1cs.A.n_rows, &mut rng); + self.w_i = Witness::::new::(w_i1, self.r1cs.n_constraints(), &mut rng); self.u_i = self.w_i.commit::(&self.cs_pp, x_i1)?; self.W_i = W_i1; self.U_i = U_i1; @@ -951,8 +869,8 @@ where let cs = ConstraintSystem::::new_ref(); let cs2 = ConstraintSystem::::new_ref(); let augmented_F_circuit = - AugmentedFCircuit::::empty(&pp.poseidon_config, f_circuit.clone()); - let cf_circuit = NovaCycleFoldCircuit::::empty(); + AugmentedFCircuit::::empty(&pp.poseidon_config, f_circuit.clone()); + let cf_circuit = NovaCycleFoldCircuit::::empty(); augmented_F_circuit.generate_constraints(cs.clone())?; cs.finalize(); @@ -965,9 +883,6 @@ where let cf_r1cs = extract_r1cs::(&cs2)?; Ok(Self { - _gc1: PhantomData, - _c2: PhantomData, - _gc2: PhantomData, r1cs, cf_r1cs, poseidon_config: pp.poseidon_config, @@ -1043,20 +958,14 @@ where } } -impl Nova +impl Nova where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, + C1: Curve, { // folds the given cyclefold circuit and its instances #[allow(clippy::type_complexity)] @@ -1065,28 +974,24 @@ where transcript: &mut T, cf_W_i: CycleFoldWitness, // witness of the running instance cf_U_i: CycleFoldCommittedInstance, // running instance - cf_u_i_x: Vec, - cf_circuit: NovaCycleFoldCircuit, + cf_circuit: NovaCycleFoldCircuit, rng: &mut impl RngCore, ) -> Result< ( - CycleFoldWitness, CycleFoldCommittedInstance, // u_i CycleFoldWitness, // W_i1 CycleFoldCommittedInstance, // U_i1 C2, // cmT - C2::ScalarField, // r_Fq ), Error, > { - fold_cyclefold_circuit::, C1, GC1, C2, GC2, CS2, H>( + fold_cyclefold_circuit::, C2, CS2, H>( transcript, self.cf_r1cs.clone(), self.cf_cs_pp.clone(), self.pp_hash, cf_W_i, cf_U_i, - cf_u_i_x, cf_circuit, rng, ) @@ -1107,57 +1012,28 @@ pub fn get_r1cs_from_cs( /// helper method to get the R1CS for both the AugmentedFCircuit and the CycleFold circuit #[allow(clippy::type_complexity)] -pub fn get_r1cs( +pub fn get_r1cs( poseidon_config: &PoseidonConfig, F_circuit: FC, ) -> Result<(R1CS, R1CS), Error> where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, + C1: Curve, { - let augmented_F_circuit = - AugmentedFCircuit::::empty(poseidon_config, F_circuit); - let cf_circuit = NovaCycleFoldCircuit::::empty(); + let augmented_F_circuit = AugmentedFCircuit::::empty(poseidon_config, F_circuit); + let cf_circuit = NovaCycleFoldCircuit::::empty(); let r1cs = get_r1cs_from_cs::(augmented_F_circuit)?; let cf_r1cs = get_r1cs_from_cs::(cf_circuit)?; Ok((r1cs, cf_r1cs)) } -/// helper method to get the pedersen params length for both the AugmentedFCircuit and the -/// CycleFold circuit -pub fn get_cs_params_len( - poseidon_config: &PoseidonConfig, - F_circuit: FC, -) -> Result<(usize, usize), Error> -where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, - FC: FCircuit, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, -{ - let (r1cs, cf_r1cs) = get_r1cs::(poseidon_config, F_circuit)?; - Ok((r1cs.A.n_rows, cf_r1cs.A.n_rows)) -} - #[cfg(test)] pub mod tests { use crate::commitment::kzg::KZG; - use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_bn254::{Bn254, Fr, G1Projective as Projective}; + use ark_grumpkin::Projective as Projective2; use super::*; use crate::commitment::pedersen::Pedersen; @@ -1167,27 +1043,32 @@ pub mod tests { /// This test tests the Nova+CycleFold IVC, and by consequence it is also testing the /// AugmentedFCircuit #[test] - fn test_ivc() { + fn test_ivc() -> Result<(), Error> { let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; // run the test using Pedersen commitments on both sides of the curve cycle - test_ivc_opt::, Pedersen, false>( + let _ = test_ivc_opt::, Pedersen, false>( poseidon_config.clone(), F_circuit, 3, - ); + )?; - test_ivc_opt::, Pedersen, true>( + let _ = test_ivc_opt::, Pedersen, true>( poseidon_config.clone(), F_circuit, 3, - ); + )?; // run the test using KZG for the commitments on the main curve, and Pedersen for the // commitments on the secondary curve - test_ivc_opt::, Pedersen, false>(poseidon_config, F_circuit, 3); + let _ = test_ivc_opt::, Pedersen, false>( + poseidon_config, + F_circuit, + 3, + )?; + Ok(()) } // test_ivc allowing to choose the CommitmentSchemes @@ -1200,10 +1081,13 @@ pub mod tests { poseidon_config: PoseidonConfig, F_circuit: CubicFCircuit, num_steps: usize, - ) -> ( - Vec, - Nova, CS1, CS2, H>, - ) { + ) -> Result< + ( + Vec, + Nova, CS1, CS2, H>, + ), + Error, + > { let mut rng = ark_std::test_rng(); let prep_param = @@ -1215,29 +1099,21 @@ pub mod tests { cf_cs_pp: None, cf_cs_vp: None, }; - let nova_params = Nova::< - Projective, - GVar, - Projective2, - GVar2, - CubicFCircuit, - CS1, - CS2, - H, - >::preprocess(&mut rng, &prep_param) - .unwrap(); + let nova_params = + Nova::, CS1, CS2, H>::preprocess( + &mut rng, + &prep_param, + )?; let z_0 = vec![Fr::from(3_u32)]; - let mut nova = - Nova::, CS1, CS2, H>::init( - &nova_params, - F_circuit, - z_0.clone(), - ) - .unwrap(); + let mut nova = Nova::, CS1, CS2, H>::init( + &nova_params, + F_circuit, + z_0.clone(), + )?; for _ in 0..num_steps { - nova.prove_step(&mut rng, vec![], None).unwrap(); + nova.prove_step(&mut rng, (), None)?; } assert_eq!(Fr::from(num_steps as u32), nova.i); @@ -1245,25 +1121,20 @@ pub mod tests { let mut nova_pp_serialized = vec![]; nova_params .0 - .serialize_compressed(&mut nova_pp_serialized) - .unwrap(); + .serialize_compressed(&mut nova_pp_serialized)?; let mut nova_vp_serialized = vec![]; nova_params .1 - .serialize_compressed(&mut nova_vp_serialized) - .unwrap(); + .serialize_compressed(&mut nova_vp_serialized)?; // deserialize the Nova params let _nova_pp_deserialized = ProverParams::::deserialize_compressed( &mut nova_pp_serialized.as_slice(), - ) - .unwrap(); + )?; let nova_vp_deserialized = Nova::< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, CS1, CS2, @@ -1273,8 +1144,7 @@ pub mod tests { ark_serialize::Compress::Yes, ark_serialize::Validate::Yes, (), // fcircuit_params - ) - .unwrap(); + )?; let ivc_proof = nova.ivc_proof(); @@ -1284,27 +1154,18 @@ pub mod tests { .serialize_compressed(&mut ivc_proof_serialized) .is_ok()); // deserialize IVCProof - let ivc_proof_deserialized = , - CS1, - CS2, - H, - > as FoldingScheme>>::IVCProof::deserialize_compressed( - ivc_proof_serialized.as_slice() - ) - .unwrap(); + let ivc_proof_deserialized = + , CS1, CS2, H> as FoldingScheme< + Projective, + Projective2, + CubicFCircuit, + >>::IVCProof::deserialize_compressed(ivc_proof_serialized.as_slice())?; // verify the deserialized IVCProof with the deserialized VerifierParams - Nova::, CS1, CS2, H>::verify( + Nova::, CS1, CS2, H>::verify( nova_vp_deserialized, // Nova's verifier params ivc_proof_deserialized, - ) - .unwrap(); - - (z_0, nova) + )?; + Ok((z_0, nova)) } } diff --git a/folding-schemes/src/folding/nova/nifs/mod.rs b/folding-schemes/src/folding/nova/nifs/mod.rs index ad3e4fa0..ff44ee73 100644 --- a/folding-schemes/src/folding/nova/nifs/mod.rs +++ b/folding-schemes/src/folding/nova/nifs/mod.rs @@ -7,7 +7,6 @@ /// - [Ova](https://hackmd.io/V4838nnlRKal9ZiTHiGYzw) /// - [Mova](https://eprint.iacr.org/2024/1220.pdf) use ark_crypto_primitives::sponge::{constraints::AbsorbGadget, Absorb, CryptographicSponge}; -use ark_ec::CurveGroup; use ark_r1cs_std::{alloc::AllocVar, boolean::Boolean, fields::fp::FpVar}; use ark_relations::r1cs::SynthesisError; use ark_std::fmt::Debug; @@ -18,7 +17,7 @@ use crate::commitment::CommitmentScheme; use crate::folding::circuits::CF1; use crate::folding::traits::{CommittedInstanceOps, CommittedInstanceVarOps}; use crate::transcript::{Transcript, TranscriptVar}; -use crate::Error; +use crate::{Curve, Error}; pub mod mova; pub mod nova; @@ -33,7 +32,7 @@ pub mod pointvsline; /// [Mova](https://eprint.iacr.org/2024/1220.pdf). /// `H` specifies whether the NIFS will use a blinding factor. pub trait NIFSTrait< - C: CurveGroup, + C: Curve, CS: CommitmentScheme, T: Transcript, const H: bool = false, @@ -99,7 +98,7 @@ pub trait NIFSTrait< /// logic of the NIFS.Verify defined in [Nova](https://eprint.iacr.org/2021/370.pdf) and it's /// variants [Ova](https://hackmd.io/V4838nnlRKal9ZiTHiGYzw) and /// [Mova](https://eprint.iacr.org/2024/1220.pdf). -pub trait NIFSGadgetTrait, S>> { +pub trait NIFSGadgetTrait, S>> { type CommittedInstance: Debug + Clone + Absorb + CommittedInstanceOps; type CommittedInstanceVar: Debug + Clone @@ -137,11 +136,14 @@ pub mod tests { use ark_pallas::{Fr, Projective}; use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar}; use ark_relations::r1cs::ConstraintSystem; - use ark_std::{test_rng, UniformRand}; + use ark_std::{cmp::max, test_rng, UniformRand}; use super::NIFSTrait; use super::*; - use crate::arith::r1cs::tests::{get_test_r1cs, get_test_z}; + use crate::arith::{ + r1cs::tests::{get_test_r1cs, get_test_z}, + Arith, + }; use crate::commitment::pedersen::Pedersen; use crate::folding::traits::{CommittedInstanceOps, CommittedInstanceVarOps}; use crate::transcript::poseidon::poseidon_canonical_config; @@ -151,11 +153,12 @@ pub mod tests { /// so that their relation can be checked. pub(crate) fn test_nifs_opt< N: NIFSTrait, PoseidonSponge>, - >() -> (N::Witness, N::CommittedInstance) { + >() -> Result<(N::Witness, N::CommittedInstance), Error> { let r1cs: R1CS = get_test_r1cs(); let mut rng = ark_std::test_rng(); - let (pedersen_params, _) = Pedersen::::setup(&mut rng, r1cs.A.n_cols).unwrap(); + let (pedersen_params, _) = + Pedersen::::setup(&mut rng, max(r1cs.n_constraints(), r1cs.n_witnesses()))?; let poseidon_config = poseidon_canonical_config::(); let mut transcript_p = PoseidonSponge::::new(&poseidon_config); @@ -165,16 +168,16 @@ pub mod tests { // prepare the running instance let z = get_test_z(3); let (w, x) = r1cs.split_z(&z); - let mut W_i = N::new_witness(w.clone(), r1cs.A.n_rows, test_rng()); - let mut U_i = N::new_instance(&mut rng, &pedersen_params, &W_i, x, vec![]).unwrap(); + let mut W_i = N::new_witness(w.clone(), r1cs.n_constraints(), test_rng()); + let mut U_i = N::new_instance(&mut rng, &pedersen_params, &W_i, x, vec![])?; let num_iters = 10; for i in 0..num_iters { // prepare the incoming instance let incoming_instance_z = get_test_z(i + 4); let (w, x) = r1cs.split_z(&incoming_instance_z); - let w_i = N::new_witness(w.clone(), r1cs.A.n_rows, test_rng()); - let u_i = N::new_instance(&mut rng, &pedersen_params, &w_i, x, vec![]).unwrap(); + let w_i = N::new_witness(w.clone(), r1cs.n_constraints(), test_rng()); + let u_i = N::new_instance(&mut rng, &pedersen_params, &w_i, x, vec![])?; // NIFS.P let (folded_witness, _, proof, _) = N::prove( @@ -186,19 +189,18 @@ pub mod tests { &U_i, &w_i, &u_i, - ) - .unwrap(); + )?; // NIFS.V let (folded_committed_instance, _) = - N::verify(&mut transcript_v, pp_hash, &U_i, &u_i, &proof).unwrap(); + N::verify(&mut transcript_v, pp_hash, &U_i, &u_i, &proof)?; // set running_instance for next loop iteration W_i = folded_witness; U_i = folded_committed_instance; } - (W_i, U_i) + Ok((W_i, U_i)) } /// Test method used to test the different implementations of the NIFSGadgetTrait (ie. Nova, @@ -252,7 +254,9 @@ pub mod tests { /// test that checks the native CommittedInstance.to_sponge_{bytes,field_elements} /// vs the R1CS constraints version - pub(crate) fn test_committed_instance_to_sponge_preimage_opt(ci: N::CommittedInstance) + pub(crate) fn test_committed_instance_to_sponge_preimage_opt( + ci: N::CommittedInstance, + ) -> Result<(), Error> where N: NIFSTrait, PoseidonSponge>, NG: NIFSGadgetTrait< @@ -267,18 +271,21 @@ pub mod tests { let cs = ConstraintSystem::::new_ref(); - let ciVar = NG::CommittedInstanceVar::new_witness(cs.clone(), || Ok(ci.clone())).unwrap(); - let bytes_var = ciVar.to_sponge_bytes().unwrap(); - let field_elements_var = ciVar.to_sponge_field_elements().unwrap(); + let ciVar = NG::CommittedInstanceVar::new_witness(cs.clone(), || Ok(ci.clone()))?; + let bytes_var = ciVar.to_sponge_bytes()?; + let field_elements_var = ciVar.to_sponge_field_elements()?; - assert!(cs.is_satisfied().unwrap()); + assert!(cs.is_satisfied()?); // check that the natively computed and in-circuit computed hashes match - assert_eq!(bytes_var.value().unwrap(), bytes); - assert_eq!(field_elements_var.value().unwrap(), field_elements); + assert_eq!(bytes_var.value()?, bytes); + assert_eq!(field_elements_var.value()?, field_elements); + Ok(()) } - pub(crate) fn test_committed_instance_hash_opt(ci: NG::CommittedInstance) + pub(crate) fn test_committed_instance_hash_opt( + ci: NG::CommittedInstance, + ) -> Result<(), Error> where N: NIFSTrait, PoseidonSponge>, NG: NIFSGadgetTrait< @@ -302,21 +309,20 @@ pub mod tests { let cs = ConstraintSystem::::new_ref(); - let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash)).unwrap(); - let iVar = FpVar::::new_witness(cs.clone(), || Ok(i)).unwrap(); - let z_0Var = Vec::>::new_witness(cs.clone(), || Ok(z_0.clone())).unwrap(); - let z_iVar = Vec::>::new_witness(cs.clone(), || Ok(z_i.clone())).unwrap(); - let ciVar = NG::CommittedInstanceVar::new_witness(cs.clone(), || Ok(ci.clone())).unwrap(); + let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash))?; + let iVar = FpVar::::new_witness(cs.clone(), || Ok(i))?; + let z_0Var = Vec::>::new_witness(cs.clone(), || Ok(z_0.clone()))?; + let z_iVar = Vec::>::new_witness(cs.clone(), || Ok(z_i.clone()))?; + let ciVar = NG::CommittedInstanceVar::new_witness(cs.clone(), || Ok(ci.clone()))?; let sponge = PoseidonSpongeVar::::new(cs.clone(), &poseidon_config); // compute the CommittedInstance hash in-circuit - let (hVar, _) = ciVar - .hash(&sponge, &pp_hashVar, &iVar, &z_0Var, &z_iVar) - .unwrap(); - assert!(cs.is_satisfied().unwrap()); + let (hVar, _) = ciVar.hash(&sponge, &pp_hashVar, &iVar, &z_0Var, &z_iVar)?; + assert!(cs.is_satisfied()?); // check that the natively computed and in-circuit computed hashes match - assert_eq!(hVar.value().unwrap(), h); + assert_eq!(hVar.value()?, h); + Ok(()) } } diff --git a/folding-schemes/src/folding/nova/nifs/mova.rs b/folding-schemes/src/folding/nova/nifs/mova.rs index 6f3353bd..890be671 100644 --- a/folding-schemes/src/folding/nova/nifs/mova.rs +++ b/folding-schemes/src/folding/nova/nifs/mova.rs @@ -1,34 +1,29 @@ /// This module contains the implementation the NIFSTrait for the /// [Mova](https://eprint.iacr.org/2024/1220.pdf) NIFS (Non-Interactive Folding Scheme). use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; use ark_ff::PrimeField; -use ark_poly::MultilinearExtension; +use ark_poly::Polynomial; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use ark_std::log2; -use ark_std::rand::RngCore; -use ark_std::{One, UniformRand, Zero}; -use std::marker::PhantomData; +use ark_std::{log2, marker::PhantomData, rand::RngCore, One, UniformRand, Zero}; use super::{ nova::NIFS as NovaNIFS, pointvsline::{PointVsLine, PointVsLineProof, PointvsLineEvaluationClaim}, NIFSTrait, }; -use crate::arith::{r1cs::R1CS, Arith}; +use crate::arith::{r1cs::R1CS, Arith, ArithRelation}; use crate::commitment::CommitmentScheme; use crate::folding::circuits::CF1; use crate::folding::traits::Dummy; -use crate::transcript::AbsorbNonNative; use crate::transcript::Transcript; use crate::utils::{ mle::dense_vec_to_dense_mle, vec::{is_zero_vec, vec_add, vec_scalar_mul}, }; -use crate::Error; +use crate::{Curve, Error}; #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct CommittedInstance { +pub struct CommittedInstance { // Random evaluation point for the E pub rE: Vec, // mleE is the evaluation of the MLE of E at r_E @@ -38,10 +33,7 @@ pub struct CommittedInstance { pub x: Vec, } -impl Absorb for CommittedInstance -where - C::ScalarField: Absorb, -{ +impl Absorb for CommittedInstance { fn to_sponge_bytes(&self, _dest: &mut Vec) { // This is never called unimplemented!() @@ -52,16 +44,11 @@ where self.x.to_sponge_field_elements(dest); self.rE.to_sponge_field_elements(dest); self.mleE.to_sponge_field_elements(dest); - // We cannot call `to_native_sponge_field_elements(dest)` directly, as - // `to_native_sponge_field_elements` needs `F` to be `C::ScalarField`, - // but here `F` is a generic `PrimeField`. - self.cmW - .to_native_sponge_field_elements_as_vec() - .to_sponge_field_elements(dest); + self.cmW.to_native_sponge_field_elements(dest); } } -impl Dummy for CommittedInstance { +impl Dummy for CommittedInstance { fn dummy(io_len: usize) -> Self { Self { rE: vec![C::ScalarField::zero(); io_len], @@ -74,23 +61,23 @@ impl Dummy for CommittedInstance { } #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Witness { +pub struct Witness { pub E: Vec, pub W: Vec, pub rW: C::ScalarField, } -impl Dummy<&R1CS> for Witness { +impl Dummy<&R1CS> for Witness { fn dummy(r1cs: &R1CS) -> Self { Self { - E: vec![C::ScalarField::zero(); r1cs.A.n_rows], - W: vec![C::ScalarField::zero(); r1cs.A.n_cols - 1 - r1cs.l], + E: vec![C::ScalarField::zero(); r1cs.n_constraints()], + W: vec![C::ScalarField::zero(); r1cs.n_witnesses()], rW: C::ScalarField::zero(), } } } -impl Witness { +impl Witness { pub fn new(w: Vec, e_len: usize, mut rng: impl RngCore) -> Self { let rW = if H { C::ScalarField::rand(&mut rng) @@ -114,10 +101,7 @@ impl Witness { let mut mleE = C::ScalarField::zero(); if !is_zero_vec::(&self.E) { let E = dense_vec_to_dense_mle(log2(self.E.len()) as usize, &self.E); - mleE = E.evaluate(&rE).ok_or(Error::NotExpectedLength( - rE.len(), - log2(self.E.len()) as usize, - ))?; + mleE = E.evaluate(&rE); } let cmW = CS::commit(params, &self.W, &self.rW)?; Ok(CommittedInstance { @@ -131,7 +115,7 @@ impl Witness { } #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Proof { +pub struct Proof { pub h_proof: PointVsLineProof, pub mleE1_prime: C::ScalarField, pub mleE2_prime: C::ScalarField, @@ -142,7 +126,7 @@ pub struct Proof { /// [Mova](https://eprint.iacr.org/2024/1220.pdf). /// `H` specifies whether the NIFS will use a blinding factor pub struct NIFS< - C: CurveGroup, + C: Curve, CS: CommitmentScheme, T: Transcript, const H: bool = false, @@ -152,11 +136,8 @@ pub struct NIFS< _ct: PhantomData, } -impl, T: Transcript, const H: bool> +impl, T: Transcript, const H: bool> NIFSTrait for NIFS -where - ::ScalarField: Absorb, - ::BaseField: PrimeField, { type CommittedInstance = CommittedInstance; type Witness = Witness; @@ -253,7 +234,7 @@ where // compute the cross terms let z1: Vec = [vec![U_i.u], U_i.x.to_vec(), W_i.W.to_vec()].concat(); let z2: Vec = [vec![u_i.u], u_i.x.to_vec(), w_i.W.to_vec()].concat(); - let T = NovaNIFS::::compute_T(r1cs, U_i.u, u_i.u, &z1, &z2)?; + let T = NovaNIFS::::compute_T(r1cs, U_i.u, u_i.u, &z1, &z2, &W_i.E, &w_i.E)?; let n_vars: usize = log2(W_i.E.len()) as usize; if log2(T.len()) as usize != n_vars { @@ -261,7 +242,7 @@ where } let mleT = dense_vec_to_dense_mle(n_vars, &T); - let mleT_evaluated = mleT.evaluate(&rE_prime).ok_or(Error::EvaluationFail)?; + let mleT_evaluated = mleT.evaluate(&rE_prime); transcript.absorb(&mleT_evaluated); @@ -336,7 +317,7 @@ where } } -impl, T: Transcript, const H: bool> +impl, T: Transcript, const H: bool> NIFS { // Protocol 7 - point 3 (15) @@ -370,7 +351,7 @@ impl, T: Transcript, c } } -impl Arith, CommittedInstance> for R1CS> { +impl ArithRelation, CommittedInstance> for R1CS> { type Evaluation = Vec>; fn eval_relation( @@ -396,16 +377,17 @@ pub mod tests { use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; use ark_pallas::{Fr, Projective}; - use crate::arith::{r1cs::tests::get_test_r1cs, Arith}; + use crate::arith::{r1cs::tests::get_test_r1cs, ArithRelation}; use crate::commitment::pedersen::Pedersen; use crate::folding::nova::nifs::tests::test_nifs_opt; #[test] - fn test_nifs_mova() { - let (W, U) = test_nifs_opt::, PoseidonSponge>>(); + fn test_nifs_mova() -> Result<(), Error> { + let (W, U) = test_nifs_opt::, PoseidonSponge>>()?; // check the last folded instance relation let r1cs = get_test_r1cs(); - r1cs.check_relation(&W, &U).unwrap(); + r1cs.check_relation(&W, &U)?; + Ok(()) } } diff --git a/folding-schemes/src/folding/nova/nifs/nova.rs b/folding-schemes/src/folding/nova/nifs/nova.rs index 4b232e84..3df73f2c 100644 --- a/folding-schemes/src/folding/nova/nifs/nova.rs +++ b/folding-schemes/src/folding/nova/nifs/nova.rs @@ -1,7 +1,6 @@ /// This module contains the implementation the NIFSTrait for the /// [Nova](https://eprint.iacr.org/2021/370.pdf) NIFS (Non-Interactive Folding Scheme). use ark_crypto_primitives::sponge::{constraints::AbsorbGadget, Absorb, CryptographicSponge}; -use ark_ec::{CurveGroup, Group}; use ark_ff::{BigInteger, PrimeField}; use ark_r1cs_std::{boolean::Boolean, fields::fp::FpVar}; use ark_relations::r1cs::SynthesisError; @@ -21,20 +20,15 @@ use crate::folding::circuits::{ use crate::folding::nova::{CommittedInstance, Witness}; use crate::transcript::{Transcript, TranscriptVar}; use crate::utils::vec::{hadamard, mat_vec_mul, vec_add, vec_scalar_mul, vec_sub}; -use crate::Error; +use crate::{Curve, Error}; /// ChallengeGadget computes the RO challenge used for the Nova instances NIFS, it contains a /// rust-native and a in-circuit compatible versions. -pub struct ChallengeGadget { +pub struct ChallengeGadget { _c: PhantomData, _ci: PhantomData, } -impl ChallengeGadget -where - C: CurveGroup, - // ::BaseField: PrimeField, - ::ScalarField: Absorb, -{ +impl ChallengeGadget { pub fn get_challenge_native>( transcript: &mut T, pp_hash: C::ScalarField, // public params hash @@ -79,7 +73,7 @@ where /// [Nova](https://eprint.iacr.org/2021/370.pdf). /// `H` specifies whether the NIFS will use a blinding factor pub struct NIFS< - C: CurveGroup, + C: Curve, CS: CommitmentScheme, T: Transcript, const H: bool = false, @@ -89,10 +83,8 @@ pub struct NIFS< _t: PhantomData, } -impl, T: Transcript, const H: bool> +impl, T: Transcript, const H: bool> NIFSTrait for NIFS -where - ::ScalarField: Absorb, { type CommittedInstance = CommittedInstance; type Witness = Witness; @@ -159,7 +151,7 @@ where // compute the cross terms let z1: Vec = [vec![U_i.u], U_i.x.to_vec(), W_i.W.to_vec()].concat(); let z2: Vec = [vec![u_i.u], u_i.x.to_vec(), w_i.W.to_vec()].concat(); - let T = Self::compute_T(r1cs, U_i.u, u_i.u, &z1, &z2)?; + let T = Self::compute_T(r1cs, U_i.u, u_i.u, &z1, &z2, &W_i.E, &w_i.E)?; // use r_T=0 since we don't need hiding property for cm(T) let cmT = CS::commit(cs_prover_params, &T, &C::ScalarField::zero())?; @@ -202,35 +194,31 @@ where } } -impl, T: Transcript, const H: bool> +impl, T: Transcript, const H: bool> NIFS -where - ::ScalarField: Absorb, { - /// compute_T: compute cross-terms T + /// compute_T: compute cross-terms T. We use the approach described in + /// [Mova](https://eprint.iacr.org/2024/1220.pdf)'s section 5.2. pub fn compute_T( r1cs: &R1CS, u1: C::ScalarField, u2: C::ScalarField, z1: &[C::ScalarField], z2: &[C::ScalarField], + E1: &[C::ScalarField], + E2: &[C::ScalarField], ) -> Result, Error> { - let (A, B, C) = (r1cs.A.clone(), r1cs.B.clone(), r1cs.C.clone()); + let z = vec_add(z1, z2)?; // this is parallelizable (for the future) - let Az1 = mat_vec_mul(&A, z1)?; - let Bz1 = mat_vec_mul(&B, z1)?; - let Cz1 = mat_vec_mul(&C, z1)?; - let Az2 = mat_vec_mul(&A, z2)?; - let Bz2 = mat_vec_mul(&B, z2)?; - let Cz2 = mat_vec_mul(&C, z2)?; - - let Az1_Bz2 = hadamard(&Az1, &Bz2)?; - let Az2_Bz1 = hadamard(&Az2, &Bz1)?; - let u1Cz2 = vec_scalar_mul(&Cz2, &u1); - let u2Cz1 = vec_scalar_mul(&Cz1, &u2); - - vec_sub(&vec_sub(&vec_add(&Az1_Bz2, &Az2_Bz1)?, &u1Cz2)?, &u2Cz1) + let Az = mat_vec_mul(&r1cs.A, &z)?; + let Bz = mat_vec_mul(&r1cs.B, &z)?; + let Cz = mat_vec_mul(&r1cs.C, &z)?; + let u = u1 + u2; + let uCz = vec_scalar_mul(&Cz, &u); + let AzBz = hadamard(&Az, &Bz)?; + let lhs = vec_sub(&AzBz, &uCz)?; + vec_sub(&vec_sub(&lhs, E1)?, E2) } pub fn compute_cyclefold_cmT( @@ -240,15 +228,12 @@ where ci1: &CycleFoldCommittedInstance, w2: &CycleFoldWitness, ci2: &CycleFoldCommittedInstance, - ) -> Result<(Vec, C), Error> - where - ::BaseField: ark_ff::PrimeField, - { + ) -> Result<(Vec, C), Error> { let z1: Vec = [vec![ci1.u], ci1.x.to_vec(), w1.W.to_vec()].concat(); let z2: Vec = [vec![ci2.u], ci2.x.to_vec(), w2.W.to_vec()].concat(); // compute cross terms - let T = Self::compute_T(r1cs, ci1.u, ci2.u, &z1, &z2)?; + let T = Self::compute_T(r1cs, ci1.u, ci2.u, &z1, &z2, &w1.E, &w2.E)?; // use r_T=0 since we don't need hiding property for cm(T) let cmT = CS::commit(cs_prover_params, &T, &C::ScalarField::zero())?; Ok((T, cmT)) @@ -297,16 +282,17 @@ pub mod tests { use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; use ark_pallas::{Fr, Projective}; - use crate::arith::{r1cs::tests::get_test_r1cs, Arith}; + use crate::arith::{r1cs::tests::get_test_r1cs, ArithRelation}; use crate::commitment::pedersen::Pedersen; use crate::folding::nova::nifs::tests::test_nifs_opt; #[test] - fn test_nifs_nova() { - let (W, U) = test_nifs_opt::, PoseidonSponge>>(); + fn test_nifs_nova() -> Result<(), Error> { + let (W, U) = test_nifs_opt::, PoseidonSponge>>()?; // check the last folded instance relation let r1cs = get_test_r1cs(); - r1cs.check_relation(&W, &U).unwrap(); + r1cs.check_relation(&W, &U)?; + Ok(()) } } diff --git a/folding-schemes/src/folding/nova/nifs/nova_circuits.rs b/folding-schemes/src/folding/nova/nifs/nova_circuits.rs index 544d087d..2487c165 100644 --- a/folding-schemes/src/folding/nova/nifs/nova_circuits.rs +++ b/folding-schemes/src/folding/nova/nifs/nova_circuits.rs @@ -1,26 +1,27 @@ /// contains [Nova](https://eprint.iacr.org/2021/370.pdf) NIFS related circuits -use ark_crypto_primitives::sponge::{constraints::AbsorbGadget, Absorb, CryptographicSponge}; -use ark_ec::{CurveGroup, Group}; +use ark_crypto_primitives::sponge::{constraints::AbsorbGadget, CryptographicSponge}; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, boolean::Boolean, eq::EqGadget, fields::{fp::FpVar, FieldVar}, uint8::UInt8, - ToConstraintFieldGadget, }; use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; use ark_std::{fmt::Debug, Zero}; use core::{borrow::Borrow, marker::PhantomData}; use super::NIFSGadgetTrait; -use crate::folding::circuits::{ - nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar}, - CF1, CF2, -}; -use crate::folding::nova::CommittedInstance; use crate::folding::traits::CommittedInstanceVarOps; use crate::transcript::TranscriptVar; +use crate::{ + folding::circuits::{ + nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar}, + CF1, CF2, + }, + Curve, +}; +use crate::{folding::nova::CommittedInstance, transcript::AbsorbNonNativeGadget}; use super::nova::ChallengeGadget; @@ -28,17 +29,14 @@ use super::nova::ChallengeGadget; /// constraints field (E1::Fr, where E1 is the main curve). The peculiarity is that cmE and cmW are /// represented non-natively over the constraint field. #[derive(Debug, Clone)] -pub struct CommittedInstanceVar { +pub struct CommittedInstanceVar { pub u: FpVar, pub x: Vec>, pub cmE: NonNativeAffineVar, pub cmW: NonNativeAffineVar, } -impl AllocVar, CF1> for CommittedInstanceVar -where - C: CurveGroup, -{ +impl AllocVar, CF1> for CommittedInstanceVar { fn new_variable>>( cs: impl Into>>, f: impl FnOnce() -> Result, @@ -61,10 +59,7 @@ where } } -impl AbsorbGadget for CommittedInstanceVar -where - C: CurveGroup, -{ +impl AbsorbGadget for CommittedInstanceVar { fn to_sponge_bytes(&self) -> Result>, SynthesisError> { FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?) } @@ -73,14 +68,14 @@ where Ok([ vec![self.u.clone()], self.x.clone(), - self.cmE.to_constraint_field()?, - self.cmW.to_constraint_field()?, + self.cmE.to_native_sponge_field_elements()?, + self.cmW.to_native_sponge_field_elements()?, ] .concat()) } } -impl CommittedInstanceVarOps for CommittedInstanceVar { +impl CommittedInstanceVarOps for CommittedInstanceVar { type PointVar = NonNativeAffineVar; fn get_commitments(&self) -> Vec { @@ -107,7 +102,7 @@ impl CommittedInstanceVarOps for CommittedInstanceVar { /// Implements the circuit that does the checks of the Non-Interactive Folding Scheme Verifier /// described in section 4 of [Nova](https://eprint.iacr.org/2021/370.pdf), where the cmE & cmW checks are /// delegated to the NIFSCycleFoldGadget. -pub struct NIFSGadget, S>> { +pub struct NIFSGadget, S>> { _c: PhantomData, _s: PhantomData, _t: PhantomData, @@ -115,10 +110,9 @@ pub struct NIFSGadget NIFSGadgetTrait for NIFSGadget where - C: CurveGroup, + C: Curve, S: CryptographicSponge, T: TranscriptVar, S>, - ::ScalarField: Absorb, { type CommittedInstance = CommittedInstance; type CommittedInstanceVar = CommittedInstanceVar; @@ -141,7 +135,7 @@ where u_i.clone(), cmT.clone(), )?; - let r = Boolean::le_bits_to_fp_var(&r_bits)?; + let r = Boolean::le_bits_to_fp(&r_bits)?; Ok(( Self::CommittedInstanceVar { @@ -179,9 +173,10 @@ pub mod tests { test_nifs_gadget_opt, }, }; + use crate::Error; #[test] - fn test_nifs_gadget() { + fn test_nifs_gadget() -> Result<(), Error> { let mut rng = ark_std::test_rng(); // prepare the committed instances to test in-circuit let ci: Vec> = (0..2) @@ -198,14 +193,14 @@ pub mod tests { let (ci_out, ciVar_out) = test_nifs_gadget_opt::< NIFS, PoseidonSponge>, NIFSGadget, PoseidonSpongeVar>, - >(ci, cmT) - .unwrap(); - assert_eq!(ciVar_out.u.value().unwrap(), ci_out.u); - assert_eq!(ciVar_out.x.value().unwrap(), ci_out.x); + >(ci, cmT)?; + assert_eq!(ciVar_out.u.value()?, ci_out.u); + assert_eq!(ciVar_out.x.value()?, ci_out.x); + Ok(()) } #[test] - fn test_committed_instance_to_sponge_preimage() { + fn test_committed_instance_to_sponge_preimage() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let ci = CommittedInstance:: { cmE: Projective::rand(&mut rng), @@ -217,11 +212,12 @@ pub mod tests { test_committed_instance_to_sponge_preimage_opt::< NIFS, PoseidonSponge>, NIFSGadget, PoseidonSpongeVar>, - >(ci); + >(ci)?; + Ok(()) } #[test] - fn test_committed_instance_hash() { + fn test_committed_instance_hash() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let ci = CommittedInstance:: { cmE: Projective::rand(&mut rng), @@ -232,6 +228,7 @@ pub mod tests { test_committed_instance_hash_opt::< NIFS, PoseidonSponge>, NIFSGadget, PoseidonSpongeVar>, - >(ci); + >(ci)?; + Ok(()) } } diff --git a/folding-schemes/src/folding/nova/nifs/ova.rs b/folding-schemes/src/folding/nova/nifs/ova.rs index b15e4e40..b905815a 100644 --- a/folding-schemes/src/folding/nova/nifs/ova.rs +++ b/folding-schemes/src/folding/nova/nifs/ova.rs @@ -1,7 +1,6 @@ /// This module contains the implementation the NIFSTrait for the /// [Ova](https://hackmd.io/V4838nnlRKal9ZiTHiGYzw) NIFS (Non-Interactive Folding Scheme). use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; use ark_ff::{BigInteger, PrimeField}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::fmt::Debug; @@ -12,13 +11,13 @@ use std::marker::PhantomData; use super::nova::ChallengeGadget; use super::ova_circuits::CommittedInstanceVar; use super::NIFSTrait; -use crate::arith::r1cs::R1CS; +use crate::arith::{r1cs::R1CS, Arith}; use crate::commitment::CommitmentScheme; use crate::folding::traits::{CommittedInstanceOps, Inputize}; use crate::folding::{circuits::CF1, traits::Dummy}; -use crate::transcript::{AbsorbNonNative, Transcript}; +use crate::transcript::Transcript; use crate::utils::vec::{hadamard, mat_vec_mul, vec_scalar_mul, vec_sub}; -use crate::Error; +use crate::{Curve, Error}; /// A CommittedInstance in [Ova](https://hackmd.io/V4838nnlRKal9ZiTHiGYzw) is represented by `W` or /// `W'`. It is the result of the commitment to a vector that contains the witness `w` concatenated @@ -26,16 +25,13 @@ use crate::Error; /// document `u` is denoted as `mu`, in this implementation we use `u` so it follows the original /// Nova notation, so code is easier to follow). #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct CommittedInstance { +pub struct CommittedInstance { pub u: C::ScalarField, // in the Ova document is denoted as `mu` pub x: Vec, pub cmWE: C, } -impl Absorb for CommittedInstance -where - C::ScalarField: Absorb, -{ +impl Absorb for CommittedInstance { fn to_sponge_bytes(&self, dest: &mut Vec) { C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest); } @@ -43,16 +39,11 @@ where fn to_sponge_field_elements(&self, dest: &mut Vec) { self.u.to_sponge_field_elements(dest); self.x.to_sponge_field_elements(dest); - // We cannot call `to_native_sponge_field_elements(dest)` directly, as - // `to_native_sponge_field_elements` needs `F` to be `C::ScalarField`, - // but here `F` is a generic `PrimeField`. - self.cmWE - .to_native_sponge_field_elements_as_vec() - .to_sponge_field_elements(dest); + self.cmWE.to_native_sponge_field_elements(dest); } } -impl CommittedInstanceOps for CommittedInstance { +impl CommittedInstanceOps for CommittedInstance { type Var = CommittedInstanceVar; fn get_commitments(&self) -> Vec { @@ -64,21 +55,23 @@ impl CommittedInstanceOps for CommittedInstance { } } -impl Inputize> for CommittedInstance { - fn inputize(&self) -> Vec { - [&[self.u][..], &self.x, &self.cmWE.inputize()].concat() +impl Inputize> for CommittedInstance { + /// Returns the internal representation in the same order as how the value + /// is allocated in `CommittedInstanceVar::new_input`. + fn inputize(&self) -> Vec> { + [&[self.u][..], &self.x, &self.cmWE.inputize_nonnative()].concat() } } /// A Witness in Ova is represented by `w`. It also contains a blinder which can or not be used /// when committing to the witness itself. #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Witness { +pub struct Witness { pub w: Vec, pub rW: C::ScalarField, } -impl Witness { +impl Witness { /// Generates a new `Witness` instance from a given witness vector. /// If `H = true`, then we assume we want to blind it at commitment time, /// hence sampling `rW` from the randomness passed. @@ -111,10 +104,10 @@ impl Witness { } } -impl Dummy<&R1CS>> for Witness { +impl Dummy<&R1CS>> for Witness { fn dummy(r1cs: &R1CS>) -> Self { Self { - w: vec![C::ScalarField::zero(); r1cs.A.n_cols - 1 - r1cs.l], + w: vec![C::ScalarField::zero(); r1cs.n_witnesses()], rW: C::ScalarField::zero(), } } @@ -122,7 +115,7 @@ impl Dummy<&R1CS>> for Witness { /// Implements the NIFS (Non-Interactive Folding Scheme) trait for Ova. pub struct NIFS< - C: CurveGroup, + C: Curve, CS: CommitmentScheme, T: Transcript, const H: bool = false, @@ -132,11 +125,8 @@ pub struct NIFS< _t: PhantomData, } -impl, T: Transcript, const H: bool> +impl, T: Transcript, const H: bool> NIFSTrait for NIFS -where - ::ScalarField: Absorb, - ::BaseField: PrimeField, { type CommittedInstance = CommittedInstance; type Witness = Witness; @@ -241,7 +231,7 @@ where /// Computes the E parameter (error terms) for the given R1CS and the instance's z and u. This /// method is used by the verifier to obtain E in order to check the RelaxedR1CS relation. -pub fn compute_E( +pub fn compute_E( r1cs: &R1CS, z: &[C::ScalarField], u: C::ScalarField, @@ -264,22 +254,22 @@ pub mod tests { use super::*; use ark_pallas::{Fr, Projective}; - use crate::arith::{r1cs::tests::get_test_r1cs, Arith}; + use crate::arith::{r1cs::tests::get_test_r1cs, ArithRelation}; use crate::commitment::pedersen::Pedersen; use crate::folding::nova::nifs::tests::test_nifs_opt; use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; // Simple auxiliary structure mainly used to help pass a witness for which we can check // easily an R1CS relation. - // Notice that checking it requires us to have `E` as per [`Arith`] trait definition. + // Notice that checking it requires us to have `E` as per [`ArithRelation`] trait definition. // But since we don't hold `E` nor `e` within the NIFS, we create this structure to pass // `e` such that the check can be done. #[derive(Debug, Clone)] - pub(crate) struct TestingWitness { + pub(crate) struct TestingWitness { pub(crate) w: Vec, pub(crate) e: Vec, } - impl Arith, CommittedInstance> for R1CS> { + impl ArithRelation, CommittedInstance> for R1CS> { type Evaluation = Vec>; fn eval_relation( @@ -300,14 +290,14 @@ pub mod tests { } #[test] - fn test_nifs_ova() { - let (W, U) = test_nifs_opt::, PoseidonSponge>>(); + fn test_nifs_ova() -> Result<(), Error> { + let (W, U) = test_nifs_opt::, PoseidonSponge>>()?; // check the last folded instance relation let r1cs = get_test_r1cs(); let z: Vec = [&[U.u][..], &U.x, &W.w].concat(); - let e = compute_E::(&r1cs, &z, U.u).unwrap(); - r1cs.check_relation(&TestingWitness:: { e, w: W.w.clone() }, &U) - .unwrap(); + let e = compute_E::(&r1cs, &z, U.u)?; + r1cs.check_relation(&TestingWitness:: { e, w: W.w.clone() }, &U)?; + Ok(()) } } diff --git a/folding-schemes/src/folding/nova/nifs/ova_circuits.rs b/folding-schemes/src/folding/nova/nifs/ova_circuits.rs index b42e6b0f..75c3eff7 100644 --- a/folding-schemes/src/folding/nova/nifs/ova_circuits.rs +++ b/folding-schemes/src/folding/nova/nifs/ova_circuits.rs @@ -1,14 +1,11 @@ /// contains [Ova](https://hackmd.io/V4838nnlRKal9ZiTHiGYzw) NIFS related circuits -use ark_crypto_primitives::sponge::{constraints::AbsorbGadget, Absorb, CryptographicSponge}; -use ark_ec::{CurveGroup, Group}; -use ark_ff::PrimeField; +use ark_crypto_primitives::sponge::{constraints::AbsorbGadget, CryptographicSponge}; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, boolean::Boolean, eq::EqGadget, fields::{fp::FpVar, FieldVar}, uint8::UInt8, - ToConstraintFieldGadget, }; use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; use ark_std::fmt::Debug; @@ -16,23 +13,24 @@ use core::{borrow::Borrow, marker::PhantomData}; use super::ova::CommittedInstance; use super::NIFSGadgetTrait; -use crate::folding::circuits::{nonnative::affine::NonNativeAffineVar, CF1}; use crate::folding::traits::CommittedInstanceVarOps; use crate::transcript::TranscriptVar; +use crate::{ + folding::circuits::{nonnative::affine::NonNativeAffineVar, CF1}, + transcript::AbsorbNonNativeGadget, +}; use crate::folding::nova::nifs::nova::ChallengeGadget; +use crate::Curve; #[derive(Debug, Clone)] -pub struct CommittedInstanceVar { +pub struct CommittedInstanceVar { pub u: FpVar, pub x: Vec>, pub cmWE: NonNativeAffineVar, } -impl AllocVar, CF1> for CommittedInstanceVar -where - C: CurveGroup, -{ +impl AllocVar, CF1> for CommittedInstanceVar { fn new_variable>>( cs: impl Into>>, f: impl FnOnce() -> Result, @@ -53,11 +51,7 @@ where } } -impl AbsorbGadget for CommittedInstanceVar -where - C: CurveGroup, - ::BaseField: ark_ff::PrimeField, -{ +impl AbsorbGadget for CommittedInstanceVar { fn to_sponge_bytes(&self) -> Result>, SynthesisError> { FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?) } @@ -66,13 +60,13 @@ where Ok([ vec![self.u.clone()], self.x.clone(), - self.cmWE.to_constraint_field()?, + self.cmWE.to_native_sponge_field_elements()?, ] .concat()) } } -impl CommittedInstanceVarOps for CommittedInstanceVar { +impl CommittedInstanceVarOps for CommittedInstanceVar { type PointVar = NonNativeAffineVar; fn get_commitments(&self) -> Vec { @@ -95,7 +89,7 @@ impl CommittedInstanceVarOps for CommittedInstanceVar { /// Implements the circuit that does the checks of the Non-Interactive Folding Scheme Verifier /// described of the Ova variant, where the cmWE check is delegated to the NIFSCycleFoldGadget. -pub struct NIFSGadget, S>> { +pub struct NIFSGadget, S>> { _c: PhantomData, _s: PhantomData, _t: PhantomData, @@ -103,13 +97,9 @@ pub struct NIFSGadget NIFSGadgetTrait for NIFSGadget where - C: CurveGroup, + C: Curve, S: CryptographicSponge, T: TranscriptVar, S>, - ::BaseField: ark_ff::PrimeField, - - ::ScalarField: Absorb, - ::BaseField: PrimeField, { type CommittedInstance = CommittedInstance; type CommittedInstanceVar = CommittedInstanceVar; @@ -132,7 +122,7 @@ where u_i.clone(), None, )?; - let r = Boolean::le_bits_to_fp_var(&r_bits)?; + let r = Boolean::le_bits_to_fp(&r_bits)?; Ok(( Self::CommittedInstanceVar { @@ -170,9 +160,10 @@ pub mod tests { test_nifs_gadget_opt, }, }; + use crate::Error; #[test] - fn test_nifs_gadget() { + fn test_nifs_gadget() -> Result<(), Error> { let mut rng = ark_std::test_rng(); // prepare the committed instances to test in-circuit let ci: Vec> = (0..2) @@ -187,14 +178,14 @@ pub mod tests { let (ci_out, ciVar_out) = test_nifs_gadget_opt::< NIFS, PoseidonSponge>, NIFSGadget, PoseidonSpongeVar>, - >(ci, Fr::zero()) - .unwrap(); - assert_eq!(ciVar_out.u.value().unwrap(), ci_out.u); - assert_eq!(ciVar_out.x.value().unwrap(), ci_out.x); + >(ci, Fr::zero())?; + assert_eq!(ciVar_out.u.value()?, ci_out.u); + assert_eq!(ciVar_out.x.value()?, ci_out.x); + Ok(()) } #[test] - fn test_committed_instance_to_sponge_preimage() { + fn test_committed_instance_to_sponge_preimage() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let ci = CommittedInstance:: { u: Fr::rand(&mut rng), @@ -205,11 +196,12 @@ pub mod tests { test_committed_instance_to_sponge_preimage_opt::< NIFS, PoseidonSponge>, NIFSGadget, PoseidonSpongeVar>, - >(ci); + >(ci)?; + Ok(()) } #[test] - fn test_committed_instance_hash() { + fn test_committed_instance_hash() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let ci = CommittedInstance:: { u: Fr::rand(&mut rng), @@ -219,6 +211,7 @@ pub mod tests { test_committed_instance_hash_opt::< NIFS, PoseidonSponge>, NIFSGadget, PoseidonSpongeVar>, - >(ci); + >(ci)?; + Ok(()) } } diff --git a/folding-schemes/src/folding/nova/nifs/pointvsline.rs b/folding-schemes/src/folding/nova/nifs/pointvsline.rs index a41c7f64..62573796 100644 --- a/folding-schemes/src/folding/nova/nifs/pointvsline.rs +++ b/folding-schemes/src/folding/nova/nifs/pointvsline.rs @@ -1,5 +1,3 @@ -use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; use ark_ff::{One, PrimeField}; use ark_poly::univariate::DensePolynomial; use ark_poly::{DenseMultilinearExtension, DenseUVPolynomial, Polynomial}; @@ -9,35 +7,32 @@ use ark_std::{log2, Zero}; use super::mova::{CommittedInstance, Witness}; use crate::transcript::Transcript; use crate::utils::mle::dense_vec_to_dense_mle; -use crate::Error; +use crate::{Curve, Error}; /// Implements the Points vs Line as described in /// [Mova](https://eprint.iacr.org/2024/1220.pdf) and Section 4.5.2 from Thaler’s book /// Claim from step 3 protocol 6 -pub struct PointvsLineEvaluationClaim { +pub struct PointvsLineEvaluationClaim { pub mleE1_prime: C::ScalarField, pub mleE2_prime: C::ScalarField, pub rE_prime: Vec, } /// Proof from step 1 protocol 6 #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct PointVsLineProof { +pub struct PointVsLineProof { pub h1: DensePolynomial, pub h2: DensePolynomial, } #[derive(Clone, Debug, Default)] -pub struct PointVsLine> { +pub struct PointVsLine> { _phantom_C: std::marker::PhantomData, _phantom_T: std::marker::PhantomData, } /// Protocol 6 from Mova -impl> PointVsLine -where - ::ScalarField: Absorb, -{ +impl> PointVsLine { pub fn prove( transcript: &mut T, ci1: &CommittedInstance, @@ -179,17 +174,18 @@ fn compute_l(r1: &[F], r1_sub_r2: &[F], x: F) -> Result, E #[cfg(test)] mod tests { use super::{compute_h, compute_l}; + use crate::Error; use ark_pallas::Fq; use ark_poly::{DenseMultilinearExtension, DenseUVPolynomial}; #[test] - fn test_compute_h() { + fn test_compute_h() -> Result<(), Error> { let mle = DenseMultilinearExtension::from_evaluations_slice(1, &[Fq::from(1), Fq::from(2)]); let r0 = [Fq::from(5)]; let r1 = [Fq::from(6)]; let r1_sub_r0: Vec = r1.iter().zip(&r0).map(|(&x, y)| x - y).collect(); - let result = compute_h(&mle, &r0, &r1_sub_r0).unwrap(); + let result = compute_h(&mle, &r0, &r1_sub_r0)?; assert_eq!( result, DenseUVPolynomial::from_coefficients_slice(&[Fq::from(6), Fq::from(1)]) @@ -200,7 +196,7 @@ mod tests { let r1 = [Fq::from(7)]; let r1_sub_r0: Vec = r1.iter().zip(&r0).map(|(&x, y)| x - y).collect(); - let result = compute_h(&mle, &r0, &r1_sub_r0).unwrap(); + let result = compute_h(&mle, &r0, &r1_sub_r0)?; assert_eq!( result, DenseUVPolynomial::from_coefficients_slice(&[Fq::from(5), Fq::from(3)]) @@ -214,7 +210,7 @@ mod tests { let r1 = [Fq::from(2), Fq::from(7)]; let r1_sub_r0: Vec = r1.iter().zip(&r0).map(|(&x, y)| x - y).collect(); - let result = compute_h(&mle, &r0, &r1_sub_r0).unwrap(); + let result = compute_h(&mle, &r0, &r1_sub_r0)?; assert_eq!( result, DenseUVPolynomial::from_coefficients_slice(&[Fq::from(14), Fq::from(3)]) @@ -236,11 +232,12 @@ mod tests { let r1 = [Fq::from(5), Fq::from(6), Fq::from(7)]; let r1_sub_r0: Vec = r1.iter().zip(&r0).map(|(&x, y)| x - y).collect(); - let result = compute_h(&mle, &r0, &r1_sub_r0).unwrap(); + let result = compute_h(&mle, &r0, &r1_sub_r0)?; assert_eq!( result, DenseUVPolynomial::from_coefficients_slice(&[Fq::from(18), Fq::from(28)]) ); + Ok(()) } #[test] @@ -264,7 +261,7 @@ mod tests { } #[test] - fn test_compute_l() { + fn test_compute_l() -> Result<(), Error> { // Test with simple non-zero values let r1 = vec![Fq::from(1), Fq::from(2), Fq::from(3)]; let r1_sub_r2 = vec![Fq::from(4), Fq::from(5), Fq::from(6)]; @@ -276,7 +273,8 @@ mod tests { Fq::from(3) + Fq::from(2) * Fq::from(6), ]; - let result = compute_l(&r1, &r1_sub_r2, x).unwrap(); + let result = compute_l(&r1, &r1_sub_r2, x)?; assert_eq!(result, expected); + Ok(()) } } diff --git a/folding-schemes/src/folding/nova/traits.rs b/folding-schemes/src/folding/nova/traits.rs index 2f13464b..0b6f294f 100644 --- a/folding-schemes/src/folding/nova/traits.rs +++ b/folding-schemes/src/folding/nova/traits.rs @@ -1,4 +1,3 @@ -use ark_ec::CurveGroup; use ark_r1cs_std::fields::fp::FpVar; use ark_relations::r1cs::SynthesisError; use ark_std::{rand::RngCore, UniformRand}; @@ -8,15 +7,15 @@ use super::nifs::nova_circuits::CommittedInstanceVar; use super::{CommittedInstance, Witness}; use crate::arith::{ r1cs::{circuits::R1CSMatricesVar, R1CS}, - Arith, ArithGadget, ArithSampler, + Arith, ArithRelation, ArithRelationGadget, ArithSampler, }; use crate::commitment::CommitmentScheme; use crate::folding::circuits::CF1; use crate::utils::gadgets::{EquivalenceGadget, VectorGadget}; -use crate::Error; +use crate::{Curve, Error}; -/// Implements `Arith` for R1CS, where the witness is of type [`Witness`], and -/// the committed instance is of type [`CommittedInstance`]. +/// Implements [`ArithRelation`] for R1CS, where the witness is of type +/// [`Witness`], and the committed instance is of type [`CommittedInstance`]. /// /// Due to the error terms `Witness.E` and `CommittedInstance.u`, R1CS here is /// considered as a relaxed R1CS. @@ -29,20 +28,20 @@ use crate::Error; /// struct, but are part of the witness and committed instance. /// /// As a follow-up, one may further ask why not providing a trait for relaxed -/// R1CS and implement it for the `R1CS` struct, where the relaxed R1CS trait -/// has methods for relaxed satisfiability check, while the `Arith` trait that -/// `R1CS` implements has methods for plain satisfiability check. +/// R1CS and implement it for the [`R1CS`] struct, where the relaxed R1CS trait +/// has methods for relaxed satisfiability check, while the [`ArithRelation`] +/// trait that [`R1CS`] implements has methods for plain satisfiability check. /// However, it would be more ideal if we have a single method that can smartly /// choose the type of satisfiability check, which would make the code more /// generic and easier to maintain. /// -/// This is achieved thanks to the new design of the [`Arith`] trait, where we -/// can implement the trait for the same constraint system with different types -/// of witnesses and committed instances. +/// This is achieved thanks to the new design of the [`ArithRelation`] trait, +/// where we can implement the trait for the same constraint system with +/// different types of witnesses and committed instances. /// For R1CS, whether it is relaxed or not is now determined by the types of `W` /// and `U`: the satisfiability check is relaxed if `W` and `U` are defined by /// folding schemes, and plain if they are vectors of field elements. -impl Arith, CommittedInstance> for R1CS> { +impl ArithRelation, CommittedInstance> for R1CS> { type Evaluation = Vec>; fn eval_relation( @@ -62,7 +61,7 @@ impl Arith, CommittedInstance> for R1CS> { } } -impl ArithSampler, CommittedInstance> for R1CS> { +impl ArithSampler, CommittedInstance> for R1CS> { fn sample_witness_instance>( &self, params: &CS::ProverParams, @@ -74,10 +73,10 @@ impl ArithSampler, CommittedInstance> for R1CS>(); let mut z = vec![u]; @@ -103,7 +102,7 @@ impl ArithSampler, CommittedInstance> for R1CS ArithGadget, CommittedInstanceVar> +impl ArithRelationGadget, CommittedInstanceVar> for R1CSMatricesVar> { type Evaluation = (Vec>, Vec>); diff --git a/folding-schemes/src/folding/nova/zk.rs b/folding-schemes/src/folding/nova/zk.rs index a560396a..fb8fd5c5 100644 --- a/folding-schemes/src/folding/nova/zk.rs +++ b/folding-schemes/src/folding/nova/zk.rs @@ -30,29 +30,25 @@ /// paper). /// And the Use-case-2 would require a modified version of the Decider circuits. /// -use ark_ff::PrimeField; -use ark_std::{One, Zero}; - -use crate::{ - arith::{r1cs::R1CS, Arith, ArithSampler}, - folding::traits::CommittedInstanceOps, - RngCore, -}; use ark_crypto_primitives::sponge::{ poseidon::{PoseidonConfig, PoseidonSponge}, - Absorb, CryptographicSponge, + CryptographicSponge, }; -use ark_ec::{CurveGroup, Group}; -use ark_r1cs_std::{groups::CurveVar, ToConstraintFieldGadget}; - -use crate::{commitment::CommitmentScheme, folding::circuits::CF2, frontend::FCircuit, Error}; +use ark_std::{rand::RngCore, One, Zero}; use super::{ nifs::{nova::NIFS, NIFSTrait}, CommittedInstance, Nova, Witness, }; +use crate::{ + arith::{r1cs::R1CS, ArithRelation, ArithSampler}, + commitment::CommitmentScheme, + folding::traits::CommittedInstanceOps, + frontend::FCircuit, + Curve, Error, +}; -pub struct RandomizedIVCProof { +pub struct RandomizedIVCProof { pub U_i: CommittedInstance, pub u_i: CommittedInstance, pub U_r: CommittedInstance, @@ -63,32 +59,18 @@ pub struct RandomizedIVCProof { pub cf_W_i: Witness, } -impl RandomizedIVCProof -where - ::ScalarField: Absorb, - ::BaseField: PrimeField, -{ +impl RandomizedIVCProof { /// Compute a zero-knowledge proof of a Nova IVC proof /// It implements the prover of appendix D.4.in https://eprint.iacr.org/2023/573.pdf /// For further details on why folding is hiding, see lemma 9 pub fn new< - GC1: CurveVar> + ToConstraintFieldGadget>, - GC2: CurveVar> + ToConstraintFieldGadget>, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, >( - nova: &Nova, + nova: &Nova, mut rng: impl RngCore, - ) -> Result, Error> - where - ::ScalarField: Absorb, - ::ScalarField: Absorb, - ::ScalarField: PrimeField, - ::BaseField: PrimeField, - ::BaseField: Absorb, - C1: CurveGroup, - { + ) -> Result, Error> { let mut transcript = PoseidonSponge::::new(&nova.poseidon_config); // I. Compute proof for 'regular' instances @@ -137,11 +119,7 @@ where /// Verify a zero-knowledge proof of a Nova IVC proof /// It implements the verifier of appendix D.4. in https://eprint.iacr.org/2023/573.pdf #[allow(clippy::too_many_arguments)] - pub fn verify< - CS1: CommitmentScheme, - GC2: CurveVar> + ToConstraintFieldGadget>, - CS2: CommitmentScheme, - >( + pub fn verify, CS2: CommitmentScheme>( r1cs: &R1CS, cf_r1cs: &R1CS, pp_hash: C1::ScalarField, @@ -152,11 +130,7 @@ where proof: &RandomizedIVCProof, ) -> Result<(), Error> where - ::ScalarField: Absorb, - ::ScalarField: Absorb, - ::BaseField: PrimeField, - ::BaseField: Absorb, - C1: CurveGroup, + C1: Curve, { // Handles case where i=0 if i == C1::ScalarField::zero() { @@ -227,139 +201,129 @@ pub mod tests { use crate::frontend::utils::CubicFCircuit; use crate::transcript::poseidon::poseidon_canonical_config; use ark_bn254::{Fr, G1Projective as Projective}; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_grumpkin::Projective as Projective2; use rand::rngs::OsRng; // Tests zk proof generation and verification for a valid nova IVC proof #[test] - fn test_zk_nova_ivc() { + fn test_zk_nova_ivc() -> Result<(), Error> { let mut rng = OsRng; let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); - let (_, nova) = test_ivc_opt::, Pedersen, true>( - poseidon_config.clone(), - F_circuit, - 3, - ); - - let proof = RandomizedIVCProof::new(&nova, &mut rng).unwrap(); - let verify = RandomizedIVCProof::verify::< + let F_circuit = CubicFCircuit::::new(())?; + let (_, nova) = test_ivc_opt::< Pedersen, - GVar2, Pedersen, - >( - &nova.r1cs, - &nova.cf_r1cs, - nova.pp_hash, - &nova.poseidon_config, - nova.i, - nova.z_0, - nova.z_i, - &proof, - ); + true, + >(poseidon_config.clone(), F_circuit, 3)?; + + let proof = RandomizedIVCProof::new(&nova, &mut rng)?; + let verify = + RandomizedIVCProof::verify::, Pedersen>( + &nova.r1cs, + &nova.cf_r1cs, + nova.pp_hash, + &nova.poseidon_config, + nova.i, + nova.z_0, + nova.z_i, + &proof, + ); assert!(verify.is_ok()); + Ok(()) } #[test] - fn test_zk_nova_when_i_is_zero() { + fn test_zk_nova_when_i_is_zero() -> Result<(), Error> { let mut rng = OsRng; let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); - let (_, nova) = test_ivc_opt::, Pedersen, true>( - poseidon_config.clone(), - F_circuit, - 0, - ); - - let proof = RandomizedIVCProof::new(&nova, &mut rng).unwrap(); - let verify = RandomizedIVCProof::verify::< + let F_circuit = CubicFCircuit::::new(())?; + let (_, nova) = test_ivc_opt::< Pedersen, - GVar2, Pedersen, - >( - &nova.r1cs, - &nova.cf_r1cs, - nova.pp_hash, - &nova.poseidon_config, - nova.i, - nova.z_0, - nova.z_i, - &proof, - ); + true, + >(poseidon_config.clone(), F_circuit, 0)?; + + let proof = RandomizedIVCProof::new(&nova, &mut rng)?; + let verify = + RandomizedIVCProof::verify::, Pedersen>( + &nova.r1cs, + &nova.cf_r1cs, + nova.pp_hash, + &nova.poseidon_config, + nova.i, + nova.z_0, + nova.z_i, + &proof, + ); assert!(verify.is_ok()); + Ok(()) } #[test] - fn test_zk_nova_verification_fails_with_wrong_running_instance() { + fn test_zk_nova_verification_fails_with_wrong_running_instance() -> Result<(), Error> { let mut rng = OsRng; let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); - let (_, nova) = test_ivc_opt::, Pedersen, true>( - poseidon_config.clone(), - F_circuit, - 3, - ); + let F_circuit = CubicFCircuit::::new(())?; + let (_, nova) = test_ivc_opt::< + Pedersen, + Pedersen, + true, + >(poseidon_config.clone(), F_circuit, 3)?; let (_, sampled_committed_instance) = nova .r1cs - .sample_witness_instance::>(&nova.cs_pp, rng) - .unwrap(); + .sample_witness_instance::>(&nova.cs_pp, rng)?; // proof verification fails with incorrect running instance let mut nova_with_incorrect_running_instance = nova.clone(); nova_with_incorrect_running_instance.U_i = sampled_committed_instance; let incorrect_proof = - RandomizedIVCProof::new(&nova_with_incorrect_running_instance, &mut rng).unwrap(); - let verify = RandomizedIVCProof::verify::< - Pedersen, - GVar2, - Pedersen, - >( - &nova_with_incorrect_running_instance.r1cs, - &nova_with_incorrect_running_instance.cf_r1cs, - nova_with_incorrect_running_instance.pp_hash, - &nova_with_incorrect_running_instance.poseidon_config, - nova_with_incorrect_running_instance.i, - nova_with_incorrect_running_instance.z_0, - nova_with_incorrect_running_instance.z_i, - &incorrect_proof, - ); + RandomizedIVCProof::new(&nova_with_incorrect_running_instance, &mut rng)?; + let verify = + RandomizedIVCProof::verify::, Pedersen>( + &nova_with_incorrect_running_instance.r1cs, + &nova_with_incorrect_running_instance.cf_r1cs, + nova_with_incorrect_running_instance.pp_hash, + &nova_with_incorrect_running_instance.poseidon_config, + nova_with_incorrect_running_instance.i, + nova_with_incorrect_running_instance.z_0, + nova_with_incorrect_running_instance.z_i, + &incorrect_proof, + ); assert!(verify.is_err()); + Ok(()) } #[test] - fn test_zk_nova_verification_fails_with_wrong_running_witness() { + fn test_zk_nova_verification_fails_with_wrong_running_witness() -> Result<(), Error> { let mut rng = OsRng; let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); - let (_, nova) = test_ivc_opt::, Pedersen, true>( - poseidon_config.clone(), - F_circuit, - 3, - ); + let F_circuit = CubicFCircuit::::new(())?; + let (_, nova) = test_ivc_opt::< + Pedersen, + Pedersen, + true, + >(poseidon_config.clone(), F_circuit, 3)?; let (sampled_committed_witness, _) = nova .r1cs - .sample_witness_instance::>(&nova.cs_pp, rng) - .unwrap(); + .sample_witness_instance::>(&nova.cs_pp, rng)?; // proof generation fails with incorrect running witness let mut nova_with_incorrect_running_witness = nova.clone(); nova_with_incorrect_running_witness.W_i = sampled_committed_witness; let incorrect_proof = - RandomizedIVCProof::new(&nova_with_incorrect_running_witness, &mut rng).unwrap(); - let verify = RandomizedIVCProof::verify::< - Pedersen, - GVar2, - Pedersen, - >( - &nova_with_incorrect_running_witness.r1cs, - &nova_with_incorrect_running_witness.cf_r1cs, - nova_with_incorrect_running_witness.pp_hash, - &nova_with_incorrect_running_witness.poseidon_config, - nova_with_incorrect_running_witness.i, - nova_with_incorrect_running_witness.z_0, - nova_with_incorrect_running_witness.z_i, - &incorrect_proof, - ); + RandomizedIVCProof::new(&nova_with_incorrect_running_witness, &mut rng)?; + let verify = + RandomizedIVCProof::verify::, Pedersen>( + &nova_with_incorrect_running_witness.r1cs, + &nova_with_incorrect_running_witness.cf_r1cs, + nova_with_incorrect_running_witness.pp_hash, + &nova_with_incorrect_running_witness.poseidon_config, + nova_with_incorrect_running_witness.i, + nova_with_incorrect_running_witness.z_0, + nova_with_incorrect_running_witness.z_i, + &incorrect_proof, + ); assert!(verify.is_err()); + Ok(()) } } diff --git a/folding-schemes/src/folding/protogalaxy/circuits.rs b/folding-schemes/src/folding/protogalaxy/circuits.rs index 5eadd98a..3bfac847 100644 --- a/folding-schemes/src/folding/protogalaxy/circuits.rs +++ b/folding-schemes/src/folding/protogalaxy/circuits.rs @@ -1,22 +1,22 @@ use ark_crypto_primitives::sponge::{ constraints::CryptographicSpongeVar, poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge}, - Absorb, CryptographicSponge, + CryptographicSponge, }; -use ark_ec::CurveGroup; use ark_ff::PrimeField; use ark_poly::{univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain}; use ark_r1cs_std::{ alloc::AllocVar, boolean::Boolean, + convert::ToBitsGadget, eq::EqGadget, fields::{fp::FpVar, FieldVar}, groups::CurveVar, poly::polynomial::univariate::dense::DensePolynomialVar, - R1CSVar, ToBitsGadget, ToConstraintFieldGadget, + R1CSVar, }; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; -use ark_std::{fmt::Debug, marker::PhantomData, One, Zero}; +use ark_std::{fmt::Debug, One, Zero}; use super::{ folding::lagrange_polys, @@ -31,20 +31,21 @@ use crate::{ CycleFoldCommittedInstanceVar, CycleFoldConfig, NIFSFullGadget, }, nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar}, - CF1, CF2, + CF1, }, traits::{CommittedInstanceVarOps, Dummy}, }, frontend::FCircuit, transcript::{AbsorbNonNativeGadget, TranscriptVar}, utils::gadgets::VectorGadget, + Curve, }; pub struct FoldingGadget {} impl FoldingGadget { #[allow(clippy::type_complexity)] - pub fn fold_committed_instance( + pub fn fold_committed_instance( transcript: &mut impl TranscriptVar, // running instance instance: &CommittedInstanceVar, @@ -134,7 +135,7 @@ pub struct AugmentationGadget; impl AugmentationGadget { #[allow(clippy::type_complexity)] - pub fn prepare_and_fold_primary( + pub fn prepare_and_fold_primary( transcript: &mut impl TranscriptVar, S>, U: CommittedInstanceVar, u_phis: Vec>, @@ -170,21 +171,17 @@ impl AugmentationGadget { } pub fn prepare_and_fold_cyclefold< - C1: CurveGroup, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, S: CryptographicSponge, >( transcript: &mut PoseidonSpongeVar>, pp_hash: FpVar>, - mut cf_U: CycleFoldCommittedInstanceVar, - cf_u_cmWs: Vec, + mut cf_U: CycleFoldCommittedInstanceVar, + cf_u_cmWs: Vec, cf_u_xs: Vec>>>, - cf_cmTs: Vec, - ) -> Result, SynthesisError> - where - C2::BaseField: PrimeField + Absorb, - { + cf_cmTs: Vec, + ) -> Result, SynthesisError> { assert_eq!(cf_u_cmWs.len(), cf_u_xs.len()); assert_eq!(cf_u_xs.len(), cf_cmTs.len()); @@ -197,7 +194,7 @@ impl AugmentationGadget { // For each CycleFold instance `cf_u`, we have `cf_u.cmE = 0`, and // `cf_u.u = 1`. let cf_u = CycleFoldCommittedInstanceVar { - cmE: GC2::zero(), + cmE: C2::Var::zero(), u: NonNativeUintVar::new_constant(ConstraintSystemRef::None, C1::BaseField::one())?, cmW, x, @@ -234,27 +231,20 @@ impl AugmentationGadget { /// defined in [CycleFold](https://eprint.iacr.org/2023/1192.pdf). These extra /// constraints verify the correct folding of CycleFold instances. #[derive(Debug, Clone)] -pub struct AugmentedFCircuit< - C1: CurveGroup, - C2: CurveGroup, - GC2: CurveVar>, - FC: FCircuit>, -> { - pub(super) _gc2: PhantomData, +pub struct AugmentedFCircuit>> { pub(super) poseidon_config: PoseidonConfig>, pub(super) pp_hash: CF1, pub(super) i: CF1, pub(super) i_usize: usize, pub(super) z_0: Vec>, pub(super) z_i: Vec>, - pub(super) external_inputs: Vec>, + pub(super) external_inputs: FC::ExternalInputs, pub(super) F: FC, // F circuit pub(super) u_i_phi: C1, pub(super) U_i: CommittedInstance, pub(super) U_i1_phi: C1, pub(super) F_coeffs: Vec>, pub(super) K_coeffs: Vec>, - pub(super) x: Option>, // public input (u_{i+1}.x[0]) pub(super) phi_stars: Vec, @@ -263,12 +253,9 @@ pub struct AugmentedFCircuit< pub(super) cf_U_i: CycleFoldCommittedInstance, // input pub(super) cf1_cmT: C2, pub(super) cf2_cmT: C2, - pub(super) cf_x: Option>, // public input (u_{i+1}.x[1]) } -impl>, FC: FCircuit>> - AugmentedFCircuit -{ +impl>> AugmentedFCircuit { pub fn empty( poseidon_config: &PoseidonConfig>, F_circuit: FC, @@ -281,14 +268,13 @@ impl>, FC: FCircuit::IO_LEN); Self { - _gc2: PhantomData, poseidon_config: poseidon_config.clone(), pp_hash: CF1::::zero(), i: CF1::::zero(), i_usize: 0, z_0: vec![CF1::::zero(); F_circuit.state_len()], z_i: vec![CF1::::zero(); F_circuit.state_len()], - external_inputs: vec![CF1::::zero(); F_circuit.external_inputs_len()], + external_inputs: FC::ExternalInputs::default(), u_i_phi: C1::zero(), U_i: u_dummy, U_i1_phi: C1::zero(), @@ -296,33 +282,32 @@ impl>, FC: FCircuit::zero(); d * k + 1], phi_stars: vec![C1::zero(); k], F: F_circuit, - x: None, // cyclefold values cf1_u_i_cmW: C2::zero(), cf2_u_i_cmW: C2::zero(), cf_U_i: cf_u_dummy, cf1_cmT: C2::zero(), cf2_cmT: C2::zero(), - cf_x: None, } } } -impl ConstraintSynthesizer> for AugmentedFCircuit +impl AugmentedFCircuit where - C1: CurveGroup, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit>, - C2::BaseField: PrimeField + Absorb, { - fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { + pub fn compute_next_state( + self, + cs: ConstraintSystemRef>, + ) -> Result>>, SynthesisError> { let pp_hash = FpVar::>::new_witness(cs.clone(), || Ok(self.pp_hash))?; let i = FpVar::>::new_witness(cs.clone(), || Ok(self.i))?; let z_0 = Vec::>>::new_witness(cs.clone(), || Ok(self.z_0))?; let z_i = Vec::>>::new_witness(cs.clone(), || Ok(self.z_i))?; let external_inputs = - Vec::>>::new_witness(cs.clone(), || Ok(self.external_inputs))?; + FC::ExternalInputsVar::new_witness(cs.clone(), || Ok(self.external_inputs))?; let u_dummy = CommittedInstance::::dummy((2, self.U_i.betas.len())); let U_i = CommittedInstanceVar::::new_witness(cs.clone(), || Ok(self.U_i))?; @@ -334,9 +319,9 @@ where let cf_u_dummy = CycleFoldCommittedInstance::dummy(ProtoGalaxyCycleFoldConfig::::IO_LEN); let cf_U_i = - CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || Ok(self.cf_U_i))?; - let cf1_cmT = GC2::new_witness(cs.clone(), || Ok(self.cf1_cmT))?; - let cf2_cmT = GC2::new_witness(cs.clone(), || Ok(self.cf2_cmT))?; + CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || Ok(self.cf_U_i))?; + let cf1_cmT = C2::Var::new_witness(cs.clone(), || Ok(self.cf1_cmT))?; + let cf2_cmT = C2::Var::new_witness(cs.clone(), || Ok(self.cf2_cmT))?; let F_coeffs = Vec::new_witness(cs.clone(), || Ok(self.F_coeffs))?; let K_coeffs = Vec::new_witness(cs.clone(), || Ok(self.K_coeffs))?; @@ -390,8 +375,17 @@ where &z_0, &z_i1, )?; - let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?; - x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?; + let x = is_basecase.select(&u_i1_x_base, &u_i1_x)?; + // This line "converts" `x` from a witness to a public input. + // Instead of directly modifying the constraint system, we explicitly + // allocate a public input and enforce that its value is indeed `x`. + // While comparing `x` with itself seems redundant, this is necessary + // because: + // - `.value()` allows an honest prover to extract public inputs without + // computing them outside the circuit. + // - `.enforce_equal()` prevents a malicious prover from claiming wrong + // public inputs that are not the honest `x` computed in-circuit. + FpVar::new_input(cs.clone(), || x.value())?.enforce_equal(&x)?; // CycleFold part // C.1. Compute cf1_u_i.x and cf2_u_i.x @@ -441,13 +435,13 @@ where // C.2. Prepare incoming CycleFold instances // C.3. Fold incoming CycleFold instances into the running instance let cf_U_i1 = - AugmentationGadget::prepare_and_fold_cyclefold::>>( + AugmentationGadget::prepare_and_fold_cyclefold::>>( &mut transcript, pp_hash.clone(), cf_U_i, vec![ - GC2::new_witness(cs.clone(), || Ok(self.cf1_u_i_cmW))?, - GC2::new_witness(cs.clone(), || Ok(self.cf2_u_i_cmW))?, + C2::Var::new_witness(cs.clone(), || Ok(self.cf1_u_i_cmW))?, + C2::Var::new_witness(cs.clone(), || Ok(self.cf2_u_i_cmW))?, ], vec![cf1_x, cf2_x], vec![cf1_cmT, cf2_cmT], @@ -459,35 +453,53 @@ where // Non-base case: u_{i+1}.x[1] == H(cf_U_{i+1}) let (cf_u_i1_x, _) = cf_U_i1.clone().hash(&sponge, pp_hash.clone())?; let (cf_u_i1_x_base, _) = - CycleFoldCommittedInstanceVar::::new_constant(cs.clone(), cf_u_dummy)? + CycleFoldCommittedInstanceVar::::new_constant(cs.clone(), cf_u_dummy)? .hash(&sponge, pp_hash.clone())?; - let cf_x = FpVar::new_input(cs.clone(), || { - Ok(self.cf_x.unwrap_or(cf_u_i1_x_base.value()?)) - })?; - cf_x.enforce_equal(&is_basecase.select(&cf_u_i1_x_base, &cf_u_i1_x)?)?; + let cf_x = is_basecase.select(&cf_u_i1_x_base, &cf_u_i1_x)?; + // This line "converts" `cf_x` from a witness to a public input. + // Instead of directly modifying the constraint system, we explicitly + // allocate a public input and enforce that its value is indeed `cf_x`. + // While comparing `cf_x` with itself seems redundant, this is necessary + // because: + // - `.value()` allows an honest prover to extract public inputs without + // computing them outside the circuit. + // - `.enforce_equal()` prevents a malicious prover from claiming wrong + // public inputs that are not the honest `cf_x` computed in-circuit. + FpVar::new_input(cs.clone(), || cf_x.value())?.enforce_equal(&cf_x)?; + + Ok(z_i1) + } +} - Ok(()) +impl ConstraintSynthesizer> for AugmentedFCircuit +where + C1: Curve, + C2: Curve, + FC: FCircuit>, +{ + fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { + self.compute_next_state(cs).map(|_| ()) } } #[cfg(test)] mod tests { - use std::error::Error; use super::*; use crate::{ arith::r1cs::tests::get_test_r1cs, folding::protogalaxy::folding::{tests::prepare_inputs, Folding}, transcript::poseidon::poseidon_canonical_config, + Error, }; use ark_bn254::{Fr, G1Projective as Projective}; use ark_relations::r1cs::ConstraintSystem; #[test] - fn test_folding_gadget() -> Result<(), Box> { + fn test_folding_gadget() -> Result<(), Error> { let k = 7; - let (witness, instance, witnesses, instances) = prepare_inputs(k); + let (witness, instance, witnesses, instances) = prepare_inputs(k)?; let r1cs = get_test_r1cs::(); // init Prover & Verifier's transcript @@ -525,7 +537,6 @@ mod tests { assert_eq!(folded_instance.e, folded_instance_var.e.value()?); assert_eq!(folded_instance.x, folded_instance_var.x.value()?); assert!(cs.is_satisfied()?); - Ok(()) } } diff --git a/folding-schemes/src/folding/protogalaxy/decider_eth.rs b/folding-schemes/src/folding/protogalaxy/decider_eth.rs index c6c2ff48..54613e24 100644 --- a/folding-schemes/src/folding/protogalaxy/decider_eth.rs +++ b/folding-schemes/src/folding/protogalaxy/decider_eth.rs @@ -2,13 +2,10 @@ /// the Decider from decider.rs file will be more efficient. /// More details can be found at the documentation page: /// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html -use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; -use ark_ff::PrimeField; -use ark_r1cs_std::{prelude::CurveVar, ToConstraintFieldGadget}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_snark::SNARK; use ark_std::{ + log2, marker::PhantomData, rand::{CryptoRng, RngCore}, One, Zero, @@ -17,20 +14,21 @@ use ark_std::{ pub use super::decider_eth_circuit::DeciderEthCircuit; use super::decider_eth_circuit::DeciderProtoGalaxyGadget; use super::ProtoGalaxy; -use crate::commitment::{ - kzg::Proof as KZGProof, pedersen::Params as PedersenParams, CommitmentScheme, -}; -use crate::folding::circuits::decider::DeciderEnabledNIFS; -use crate::folding::circuits::CF2; -use crate::folding::traits::{Inputize, WitnessOps}; +use crate::arith::Arith; +use crate::folding::traits::{InputizeNonNative, WitnessOps}; +use crate::folding::{circuits::decider::DeciderEnabledNIFS, traits::Dummy}; use crate::frontend::FCircuit; use crate::Error; +use crate::{ + commitment::{kzg::Proof as KZGProof, pedersen::Params as PedersenParams, CommitmentScheme}, + Curve, +}; use crate::{Decider as DeciderTrait, FoldingScheme}; #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] pub struct Proof where - C: CurveGroup, + C: Curve, CS: CommitmentScheme, S: SNARK, { @@ -45,7 +43,7 @@ where #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] pub struct VerifierParam where - C1: CurveGroup, + C1: Curve, CS_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize, S_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize, { @@ -56,11 +54,9 @@ where /// Onchain Decider, for ethereum use cases #[derive(Clone, Debug)] -pub struct Decider { +pub struct Decider { _c1: PhantomData, - _gc1: PhantomData, _c2: PhantomData, - _gc2: PhantomData, _fc: PhantomData, _cs1: PhantomData, _cs2: PhantomData, @@ -68,13 +64,11 @@ pub struct Decider { _fs: PhantomData, } -impl DeciderTrait - for Decider +impl DeciderTrait + for Decider where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, // CS1 is a KZG commitment, where challenge is C1::Fr elem CS1: CommitmentScheme< @@ -87,19 +81,14 @@ where CS2: CommitmentScheme>, S: SNARK, FS: FoldingScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, // constrain FS into ProtoGalaxy, since this is a Decider specifically for ProtoGalaxy - ProtoGalaxy: From, + ProtoGalaxy: From, crate::folding::protogalaxy::ProverParams: From<>::ProverParam>, crate::folding::protogalaxy::VerifierParams: From<>::VerifierParam>, { - type PreprocessorParam = (FS::ProverParam, FS::VerifierParam); + type PreprocessorParam = ((FS::ProverParam, FS::VerifierParam), usize); type ProverParam = (S::ProvingKey, CS1::ProverParams); type Proof = Proof; type VerifierParam = VerifierParam; @@ -108,29 +97,45 @@ where fn preprocess( mut rng: impl RngCore + CryptoRng, - prep_param: Self::PreprocessorParam, - fs: FS, + ((pp, vp), state_len): Self::PreprocessorParam, ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { - let circuit = DeciderEthCircuit::::try_from(ProtoGalaxy::from(fs))?; - - // get the Groth16 specific setup for the circuit - let (g16_pk, g16_vk) = S::circuit_specific_setup(circuit, &mut rng).unwrap(); - // get the FoldingScheme prover & verifier params from ProtoGalaxy - #[allow(clippy::type_complexity)] - let protogalaxy_pp: as FoldingScheme< + let protogalaxy_pp: as FoldingScheme< C1, C2, FC, - >>::ProverParam = prep_param.0.clone().into(); - #[allow(clippy::type_complexity)] - let protogalaxy_vp: as FoldingScheme< + >>::ProverParam = pp.into(); + let protogalaxy_vp: as FoldingScheme< C1, C2, FC, - >>::VerifierParam = prep_param.1.clone().into(); + >>::VerifierParam = vp.into(); let pp_hash = protogalaxy_vp.pp_hash()?; + // We fix `k`, the number of incoming instances, to 1, because + // multi-instances folding is not supported yet. + // TODO: Support multi-instances folding and make `k` a constant generic parameter (as in + // HyperNova). Tracking issue: + // https://github.com/privacy-scaling-explorations/sonobe/issues/82 + let k = 1; + let d = protogalaxy_vp.r1cs.degree(); + let t = log2(protogalaxy_vp.r1cs.n_constraints()) as usize; + + let circuit = DeciderEthCircuit::::dummy(( + protogalaxy_vp.r1cs, + protogalaxy_vp.cf_r1cs, + protogalaxy_pp.cf_cs_params, + protogalaxy_pp.poseidon_config, + (t, d, k), + k + 1, // `k + 1` is the length of `L_X_evals` + state_len, + 1, // ProtoGalaxy's running CommittedInstance contains 1 commitment + )); + + // get the Groth16 specific setup for the circuit + let (g16_pk, g16_vk) = S::circuit_specific_setup(circuit, &mut rng) + .map_err(|e| Error::SNARKSetupFail(e.to_string()))?; + let pp = (g16_pk, protogalaxy_pp.cs_params); let vp = Self::VerifierParam { pp_hash, @@ -147,8 +152,7 @@ where ) -> Result { let (snark_pk, cs_pk): (S::ProvingKey, CS1::ProverParams) = pp; - let circuit = - DeciderEthCircuit::::try_from(ProtoGalaxy::from(folding_scheme))?; + let circuit = DeciderEthCircuit::::try_from(ProtoGalaxy::from(folding_scheme))?; let L_X_evals = circuit.randomness.clone(); @@ -173,8 +177,20 @@ where Ok(Self::Proof { snark_proof, L_X_evals, - kzg_proofs: kzg_proofs.try_into().unwrap(), - kzg_challenges: kzg_challenges.try_into().unwrap(), + kzg_proofs: kzg_proofs.try_into().map_err(|_| { + Error::ConversionError( + "Vec<_>".to_string(), + "[_; 1]".to_string(), + "variable name: kzg_proofs".to_string(), + ) + })?, + kzg_challenges: kzg_challenges.try_into().map_err(|_| { + Error::ConversionError( + "Vec<_>".to_string(), + "[_; 1]".to_string(), + "variable name: kzg_challenges".to_string(), + ) + })?, }) } @@ -210,10 +226,7 @@ where &[pp_hash, i][..], &z_0, &z_i, - &U_final_commitments - .iter() - .flat_map(|c| c.inputize()) - .collect::>(), + &U_final_commitments.inputize_nonnative(), &proof.kzg_challenges, &proof.kzg_proofs.iter().map(|p| p.eval).collect::>(), &proof.L_X_evals, @@ -243,9 +256,9 @@ where #[cfg(test)] pub mod tests { use ark_bn254::Bn254; - use ark_bn254::{constraints::GVar, Fr, G1Projective as Projective}; + use ark_bn254::{Fr, G1Projective as Projective}; use ark_groth16::Groth16; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_grumpkin::Projective as Projective2; use std::time::Instant; use super::*; @@ -255,24 +268,21 @@ pub mod tests { use crate::folding::traits::CommittedInstanceOps; use crate::frontend::utils::CubicFCircuit; use crate::transcript::poseidon::poseidon_canonical_config; + use crate::Error; #[test] - fn test_decider() { + fn test_decider() -> Result<(), Error> { // use ProtoGalaxy as FoldingScheme type PG = ProtoGalaxy< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, Bn254>, Pedersen, >; type D = Decider< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, Bn254>, Pedersen, @@ -283,25 +293,25 @@ pub mod tests { let mut rng = rand::rngs::OsRng; let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let z_0 = vec![Fr::from(3_u32)]; let preprocessor_param = (poseidon_config, F_circuit); - let protogalaxy_params = PG::preprocess(&mut rng, &preprocessor_param).unwrap(); + let protogalaxy_params = PG::preprocess(&mut rng, &preprocessor_param)?; let start = Instant::now(); - let mut protogalaxy = PG::init(&protogalaxy_params, F_circuit, z_0.clone()).unwrap(); + let mut protogalaxy = PG::init(&protogalaxy_params, F_circuit, z_0.clone())?; println!("ProtoGalaxy initialized, {:?}", start.elapsed()); - protogalaxy.prove_step(&mut rng, vec![], None).unwrap(); - protogalaxy.prove_step(&mut rng, vec![], None).unwrap(); // do a 2nd step + protogalaxy.prove_step(&mut rng, (), None)?; + protogalaxy.prove_step(&mut rng, (), None)?; // do a 2nd step // prepare the Decider prover & verifier params let (decider_pp, decider_vp) = - D::preprocess(&mut rng, protogalaxy_params, protogalaxy.clone()).unwrap(); + D::preprocess(&mut rng, (protogalaxy_params, F_circuit.state_len()))?; // decider proof generation let start = Instant::now(); - let proof = D::prove(rng, decider_pp, protogalaxy.clone()).unwrap(); + let proof = D::prove(rng, decider_pp, protogalaxy.clone())?; println!("Decider prove, {:?}", start.elapsed()); // decider proof verification @@ -314,8 +324,7 @@ pub mod tests { &protogalaxy.U_i.get_commitments(), &protogalaxy.u_i.get_commitments(), &proof, - ) - .unwrap(); + )?; assert!(verified); println!("Decider verify, {:?}", start.elapsed()); @@ -328,31 +337,27 @@ pub mod tests { &protogalaxy.U_i.get_commitments(), &protogalaxy.u_i.get_commitments(), &proof, - ) - .unwrap(); + )?; assert!(verified); + Ok(()) } // Test to check the serialization and deserialization of diverse Decider related parameters. // This test is the same test as `test_decider` but it serializes values and then uses the // deserialized values to continue the checks. #[test] - fn test_decider_serialization() { + fn test_decider_serialization() -> Result<(), Error> { // use ProtoGalaxy as FoldingScheme type PG = ProtoGalaxy< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, Bn254>, Pedersen, >; type D = Decider< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, KZG<'static, Bn254>, Pedersen, @@ -363,34 +368,28 @@ pub mod tests { let mut rng = rand::rngs::OsRng; let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let z_0 = vec![Fr::from(3_u32)]; let preprocessor_param = (poseidon_config, F_circuit); - let protogalaxy_params = PG::preprocess(&mut rng, &preprocessor_param).unwrap(); - - let start = Instant::now(); - let mut protogalaxy = PG::init(&protogalaxy_params, F_circuit, z_0.clone()).unwrap(); - println!("ProtoGalaxy initialized, {:?}", start.elapsed()); - protogalaxy.prove_step(&mut rng, vec![], None).unwrap(); - protogalaxy.prove_step(&mut rng, vec![], None).unwrap(); // do a 2nd step + let protogalaxy_params = PG::preprocess(&mut rng, &preprocessor_param)?; // prepare the Decider prover & verifier params - let (decider_pp, decider_vp) = - D::preprocess(&mut rng, protogalaxy_params.clone(), protogalaxy.clone()).unwrap(); + let (decider_pp, decider_vp) = D::preprocess( + &mut rng, + (protogalaxy_params.clone(), F_circuit.state_len()), + )?; // serialize the Nova params. These params are the trusted setup of the commitment schemes used // (ie. KZG & Pedersen in this case) let mut protogalaxy_pp_serialized = vec![]; protogalaxy_params .0 - .serialize_compressed(&mut protogalaxy_pp_serialized) - .unwrap(); + .serialize_compressed(&mut protogalaxy_pp_serialized)?; let mut protogalaxy_vp_serialized = vec![]; protogalaxy_params .1 - .serialize_compressed(&mut protogalaxy_vp_serialized) - .unwrap(); + .serialize_compressed(&mut protogalaxy_vp_serialized)?; // deserialize the Nova params. This would be done by the client reading from a file let protogalaxy_pp_deserialized = ProverParams::< Projective, @@ -399,8 +398,7 @@ pub mod tests { Pedersen, >::deserialize_compressed( &mut protogalaxy_pp_serialized.as_slice() - ) - .unwrap(); + )?; let protogalaxy_vp_deserialized = as CommitmentScheme>::VerifierParams, as SNARK>::VerifyingKey, - >::deserialize_compressed(&mut decider_vp_serialized.as_slice()) - .unwrap(); + >::deserialize_compressed(&mut decider_vp_serialized.as_slice())?; let proof_deserialized = Proof::, Groth16>::deserialize_compressed( &mut proof_serialized.as_slice(), - ) - .unwrap(); + )?; // deserialize the public inputs from the single packet 'public_inputs_serialized' let mut reader = public_inputs_serialized.as_slice(); - let i_deserialized = Fr::deserialize_compressed(&mut reader).unwrap(); - let z_0_deserialized = Vec::::deserialize_compressed(&mut reader).unwrap(); - let z_i_deserialized = Vec::::deserialize_compressed(&mut reader).unwrap(); + let i_deserialized = Fr::deserialize_compressed(&mut reader)?; + let z_0_deserialized = Vec::::deserialize_compressed(&mut reader)?; + let z_i_deserialized = Vec::::deserialize_compressed(&mut reader)?; // decider proof verification using the deserialized data let verified = D::verify( @@ -496,8 +485,8 @@ pub mod tests { &protogalaxy.U_i.get_commitments(), &protogalaxy.u_i.get_commitments(), &proof_deserialized, - ) - .unwrap(); + )?; assert!(verified); + Ok(()) } } diff --git a/folding-schemes/src/folding/protogalaxy/decider_eth_circuit.rs b/folding-schemes/src/folding/protogalaxy/decider_eth_circuit.rs index 57d69658..da9ce4e3 100644 --- a/folding-schemes/src/folding/protogalaxy/decider_eth_circuit.rs +++ b/folding-schemes/src/folding/protogalaxy/decider_eth_circuit.rs @@ -3,16 +3,13 @@ use ark_crypto_primitives::sponge::{ constraints::CryptographicSpongeVar, poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, - Absorb, CryptographicSponge, + CryptographicSponge, }; -use ark_ec::CurveGroup; use ark_ff::PrimeField; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, eq::EqGadget, fields::fp::FpVar, - groups::CurveVar, - ToConstraintFieldGadget, }; use ark_relations::r1cs::{Namespace, SynthesisError}; use ark_std::{borrow::Borrow, marker::PhantomData}; @@ -26,12 +23,12 @@ use crate::{ on_chain::GenericOnchainDeciderCircuit, DeciderEnabledNIFS, EvalGadget, KZGChallengesGadget, }, - CF1, CF2, + CF1, }, traits::{WitnessOps, WitnessVarOps}, }, frontend::FCircuit, - Error, + Curve, Error, }; use super::{ @@ -71,10 +68,9 @@ impl WitnessVarOps for WitnessVar { } } -pub type DeciderEthCircuit = GenericOnchainDeciderCircuit< +pub type DeciderEthCircuit = GenericOnchainDeciderCircuit< C1, C2, - GC2, CommittedInstance, CommittedInstance, Witness>, @@ -85,21 +81,17 @@ pub type DeciderEthCircuit = GenericOnchainDeciderCircuit< /// returns an instance of the DeciderEthCircuit from the given ProtoGalaxy struct impl< - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, // enforce that the CS2 is Pedersen commitment scheme, since we're at Ethereum's EVM decider CS2: CommitmentScheme>, - > TryFrom> for DeciderEthCircuit -where - CF1: Absorb, + > TryFrom> for DeciderEthCircuit { type Error = Error; - fn try_from(protogalaxy: ProtoGalaxy) -> Result { + fn try_from(protogalaxy: ProtoGalaxy) -> Result { let mut transcript = PoseidonSponge::::new(&protogalaxy.poseidon_config); let (U_i1, W_i1, proof, aux) = Folding::prove( @@ -123,7 +115,6 @@ where .collect::, _>>()?; Ok(Self { - _gc2: PhantomData, _avar: PhantomData, arith: protogalaxy.r1cs, cf_arith: protogalaxy.cf_r1cs, @@ -151,7 +142,7 @@ where pub struct DeciderProtoGalaxyGadget; -impl +impl DeciderEnabledNIFS< C, CommittedInstance, @@ -201,8 +192,8 @@ impl #[cfg(test)] pub mod tests { - use ark_bn254::{constraints::GVar, Fr, G1Projective as Projective}; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_bn254::{Fr, G1Projective as Projective}; + use ark_grumpkin::Projective as Projective2; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; use super::*; @@ -213,40 +204,38 @@ pub mod tests { use crate::FoldingScheme; #[test] - fn test_decider_circuit() { + fn test_decider_circuit() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let z_0 = vec![Fr::from(3_u32)]; type PG = ProtoGalaxy< Projective, - GVar, Projective2, - GVar2, CubicFCircuit, Pedersen, Pedersen, >; - let pg_params = PG::preprocess(&mut rng, &(poseidon_config, F_circuit)).unwrap(); + let pg_params = PG::preprocess(&mut rng, &(poseidon_config, F_circuit))?; // generate a Nova instance and do a step of it - let mut protogalaxy = PG::init(&pg_params, F_circuit, z_0.clone()).unwrap(); - protogalaxy.prove_step(&mut rng, vec![], None).unwrap(); + let mut protogalaxy = PG::init(&pg_params, F_circuit, z_0.clone())?; + protogalaxy.prove_step(&mut rng, (), None)?; let ivc_proof = protogalaxy.ivc_proof(); - PG::verify(pg_params.1, ivc_proof).unwrap(); + PG::verify(pg_params.1, ivc_proof)?; // load the DeciderEthCircuit from the generated Nova instance - let decider_circuit = - DeciderEthCircuit::::try_from(protogalaxy).unwrap(); + let decider_circuit = DeciderEthCircuit::::try_from(protogalaxy)?; let cs = ConstraintSystem::::new_ref(); // generate the constraints and check that are satisfied by the inputs - decider_circuit.generate_constraints(cs.clone()).unwrap(); - assert!(cs.is_satisfied().unwrap()); + decider_circuit.generate_constraints(cs.clone())?; + assert!(cs.is_satisfied()?); dbg!(cs.num_constraints()); + Ok(()) } } diff --git a/folding-schemes/src/folding/protogalaxy/folding.rs b/folding-schemes/src/folding/protogalaxy/folding.rs index 1e098358..9cd10021 100644 --- a/folding-schemes/src/folding/protogalaxy/folding.rs +++ b/folding-schemes/src/folding/protogalaxy/folding.rs @@ -1,6 +1,4 @@ /// Implements the scheme described in [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf) -use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; use ark_ff::PrimeField; use ark_poly::{ univariate::{DensePolynomial, SparsePolynomial}, @@ -14,11 +12,11 @@ use super::utils::{all_powers, betas_star, exponential_powers, pow_i}; use super::ProtoGalaxyError; use super::{CommittedInstance, Witness}; -use crate::arith::r1cs::R1CS; -use crate::folding::traits::Dummy; use crate::transcript::Transcript; use crate::utils::vec::*; use crate::Error; +use crate::{arith::r1cs::R1CS, Curve}; +use crate::{arith::Arith, folding::traits::Dummy}; #[derive(Debug, Clone)] pub struct ProtoGalaxyProof { @@ -36,7 +34,7 @@ impl Dummy<(usize, usize, usize)> for ProtoGalaxyProof { } #[derive(Debug, Clone)] -pub struct ProtoGalaxyAux { +pub struct ProtoGalaxyAux { pub L_X_evals: Vec, pub phi_stars: Vec, } @@ -44,13 +42,10 @@ pub struct ProtoGalaxyAux { #[derive(Clone, Debug)] /// Implements the protocol described in section 4 of /// [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf) -pub struct Folding { +pub struct Folding { _phantom: PhantomData, } -impl Folding -where - ::ScalarField: Absorb, -{ +impl Folding { #![allow(clippy::type_complexity)] /// implements the non-interactive Prover from the folding scheme described in section 4 pub fn prove( @@ -79,11 +74,11 @@ where vec_w.len(), )); } - let d = 2; // for the moment hardcoded to 2 since it only supports R1CS + let d = r1cs.degree(); let k = vec_instances.len(); let t = instance.betas.len(); - let n = r1cs.A.n_cols; - let m = r1cs.A.n_rows; + let n = r1cs.n_variables(); + let m = r1cs.n_constraints(); let z = [vec![C::ScalarField::one()], instance.x.clone(), w.w.clone()].concat(); @@ -145,7 +140,7 @@ where // 'refreshed' randomness) satisfies the relation. #[cfg(test)] { - use crate::arith::Arith; + use crate::arith::ArithRelation; r1cs.check_relation( w, &CommittedInstance::<_, true> { @@ -214,9 +209,7 @@ where let L0_e = &L_X[0] * F_alpha; let G_L0e = &G_X - &L0_e; // Pending optimization: move division by Z_X to the prev loop - let (K_X, remainder) = G_L0e.divide_by_vanishing_poly(H).ok_or(Error::ProtoGalaxy( - ProtoGalaxyError::CouldNotDivideByVanishing, - ))?; + let (K_X, remainder) = G_L0e.divide_by_vanishing_poly(H); if !remainder.is_zero() { return Err(Error::ProtoGalaxy(ProtoGalaxyError::RemainderNotZero)); } @@ -415,7 +408,7 @@ pub mod tests { use ark_std::{rand::Rng, UniformRand}; use crate::arith::r1cs::tests::{get_test_r1cs, get_test_z_split}; - use crate::arith::Arith; + use crate::arith::ArithRelation; use crate::commitment::{pedersen::Pedersen, CommitmentScheme}; use crate::transcript::poseidon::poseidon_canonical_config; @@ -436,21 +429,24 @@ pub mod tests { // k represents the number of instances to be fold, apart from the running instance #[allow(clippy::type_complexity)] - pub fn prepare_inputs( + pub fn prepare_inputs( k: usize, - ) -> ( - Witness, - CommittedInstance, - Vec>, - Vec>, - ) { + ) -> Result< + ( + Witness, + CommittedInstance, + Vec>, + Vec>, + ), + Error, + > { let mut rng = ark_std::test_rng(); let (_, x, w) = get_test_z_split::(rng.gen::() as usize); - let (pedersen_params, _) = Pedersen::::setup(&mut rng, w.len()).unwrap(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, w.len())?; - let t = log2(get_test_r1cs::().A.n_rows) as usize; + let t = log2(get_test_r1cs::().n_constraints()) as usize; let beta = C::ScalarField::rand(&mut rng); let betas = exponential_powers(beta, t); @@ -459,7 +455,7 @@ pub mod tests { w, r_w: C::ScalarField::zero(), }; - let phi = Pedersen::::commit(&pedersen_params, &witness.w, &witness.r_w).unwrap(); + let phi = Pedersen::::commit(&pedersen_params, &witness.w, &witness.r_w)?; let instance = CommittedInstance:: { phi, betas: betas.clone(), @@ -476,8 +472,7 @@ pub mod tests { w: w_i, r_w: C::ScalarField::zero(), }; - let phi_i = - Pedersen::::commit(&pedersen_params, &witness_i.w, &witness_i.r_w).unwrap(); + let phi_i = Pedersen::::commit(&pedersen_params, &witness_i.w, &witness_i.r_w)?; let instance_i = CommittedInstance:: { phi: phi_i, betas: vec![], @@ -488,13 +483,13 @@ pub mod tests { instances.push(instance_i); } - (witness, instance, witnesses, instances) + Ok((witness, instance, witnesses, instances)) } #[test] - fn test_fold() { + fn test_fold() -> Result<(), Error> { let k = 7; - let (witness, instance, witnesses, instances) = prepare_inputs(k); + let (witness, instance, witnesses, instances) = prepare_inputs(k)?; let r1cs = get_test_r1cs::(); // init Prover & Verifier's transcript @@ -509,12 +504,11 @@ pub mod tests { &witness, &instances, &witnesses, - ) - .unwrap(); + )?; // verifier let folded_instance_v = - Folding::::verify(&mut transcript_v, &instance, &instances, proof).unwrap(); + Folding::::verify(&mut transcript_v, &instance, &instances, proof)?; // check that prover & verifier folded instances are the same values assert_eq!(folded_instance.phi, folded_instance_v.phi); @@ -523,12 +517,12 @@ pub mod tests { assert!(!folded_instance.e.is_zero()); // check that the folded instance satisfies the relation - r1cs.check_relation(&folded_witness, &folded_instance) - .unwrap(); + r1cs.check_relation(&folded_witness, &folded_instance)?; + Ok(()) } #[test] - fn test_fold_various_iterations() { + fn test_fold_various_iterations() -> Result<(), Error> { let r1cs = get_test_r1cs::(); // init Prover & Verifier's transcript @@ -536,14 +530,14 @@ pub mod tests { let mut transcript_p = PoseidonSponge::::new(&poseidon_config); let mut transcript_v = PoseidonSponge::::new(&poseidon_config); - let (mut running_witness, mut running_instance, _, _) = prepare_inputs(0); + let (mut running_witness, mut running_instance, _, _) = prepare_inputs(0)?; // fold k instances on each of num_iters iterations let k = 7; let num_iters = 10; for _ in 0..num_iters { // generate the instances to be fold - let (_, _, witnesses, instances) = prepare_inputs(k); + let (_, _, witnesses, instances) = prepare_inputs(k)?; let (folded_instance, folded_witness, proof, _) = Folding::::prove( &mut transcript_p, @@ -552,8 +546,7 @@ pub mod tests { &running_witness, &instances, &witnesses, - ) - .unwrap(); + )?; // verifier let folded_instance_v = Folding::::verify( @@ -561,8 +554,7 @@ pub mod tests { &running_instance, &instances, proof, - ) - .unwrap(); + )?; // check that prover & verifier folded instances are the same values assert_eq!(folded_instance, folded_instance_v); @@ -570,11 +562,11 @@ pub mod tests { assert!(!folded_instance.e.is_zero()); // check that the folded instance satisfies the relation - r1cs.check_relation(&folded_witness, &folded_instance) - .unwrap(); + r1cs.check_relation(&folded_witness, &folded_instance)?; running_witness = folded_witness; running_instance = folded_instance; } + Ok(()) } } diff --git a/folding-schemes/src/folding/protogalaxy/mod.rs b/folding-schemes/src/folding/protogalaxy/mod.rs index e251d72a..ffc72f32 100644 --- a/folding-schemes/src/folding/protogalaxy/mod.rs +++ b/folding-schemes/src/folding/protogalaxy/mod.rs @@ -1,16 +1,14 @@ /// Implements the scheme described in [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf) use ark_crypto_primitives::sponge::{ poseidon::{PoseidonConfig, PoseidonSponge}, - Absorb, CryptographicSponge, + CryptographicSponge, }; -use ark_ec::{CurveGroup, Group}; use ark_ff::{BigInteger, PrimeField}; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, eq::EqGadget, fields::{fp::FpVar, FieldVar}, - groups::CurveVar, - R1CSVar, ToConstraintFieldGadget, + R1CSVar, }; use ark_relations::r1cs::{ ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, Namespace, SynthesisError, @@ -25,7 +23,7 @@ use num_bigint::BigUint; use crate::{ arith::{ r1cs::{extract_r1cs, extract_w_x, R1CS}, - Arith, + Arith, ArithRelation, }, commitment::CommitmentScheme, folding::circuits::{ @@ -34,12 +32,12 @@ use crate::{ CycleFoldWitness, }, nonnative::affine::NonNativeAffineVar, - CF1, CF2, + CF1, }, frontend::{utils::DummyCircuit, FCircuit}, transcript::poseidon::poseidon_canonical_config, - utils::{get_cm_coordinates, pp_hash}, - Error, FoldingScheme, + utils::pp_hash, + Curve, Error, FoldingScheme, }; pub mod circuits; @@ -58,20 +56,19 @@ use super::traits::{ }; /// Configuration for ProtoGalaxy's CycleFold circuit -pub struct ProtoGalaxyCycleFoldConfig { +pub struct ProtoGalaxyCycleFoldConfig { _c: PhantomData, } -impl CycleFoldConfig for ProtoGalaxyCycleFoldConfig { +impl CycleFoldConfig for ProtoGalaxyCycleFoldConfig { const RANDOMNESS_BIT_LENGTH: usize = C::ScalarField::MODULUS_BIT_SIZE as usize; const N_INPUT_POINTS: usize = 2; type C = C; - type F = C::BaseField; } /// CycleFold circuit for computing random linear combinations of group elements /// in ProtoGalaxy instances. -pub type ProtoGalaxyCycleFoldCircuit = CycleFoldCircuit, GC>; +pub type ProtoGalaxyCycleFoldCircuit = CycleFoldCircuit>; /// The committed instance of ProtoGalaxy. /// @@ -79,14 +76,14 @@ pub type ProtoGalaxyCycleFoldCircuit = CycleFoldCircuit { +pub struct CommittedInstance { phi: C, betas: Vec, e: C::ScalarField, x: Vec, } -impl Dummy<(usize, usize)> for CommittedInstance { +impl Dummy<(usize, usize)> for CommittedInstance { fn dummy((io_len, t): (usize, usize)) -> Self { if TYPE == INCOMING { assert_eq!(t, 0); @@ -100,18 +97,18 @@ impl Dummy<(usize, usize)> for CommittedInstanc } } -impl Dummy<&R1CS>> for CommittedInstance { +impl Dummy<&R1CS>> for CommittedInstance { fn dummy(r1cs: &R1CS>) -> Self { let t = if TYPE == RUNNING { - log2(r1cs.num_constraints()) as usize + log2(r1cs.n_constraints()) as usize } else { 0 }; - Self::dummy((r1cs.num_public_inputs(), t)) + Self::dummy((r1cs.n_public_inputs(), t)) } } -impl CommittedInstanceOps for CommittedInstance { +impl CommittedInstanceOps for CommittedInstance { type Var = CommittedInstanceVar; fn get_commitments(&self) -> Vec { @@ -123,23 +120,29 @@ impl CommittedInstanceOps for CommittedInsta } } -impl Inputize> - for CommittedInstance -{ - fn inputize(&self) -> Vec { - [&self.phi.inputize(), &self.betas, &[self.e][..], &self.x].concat() +impl Inputize> for CommittedInstance { + /// Returns the internal representation in the same order as how the value + /// is allocated in `CommittedInstanceVar::new_input`. + fn inputize(&self) -> Vec> { + [ + &self.phi.inputize_nonnative(), + &self.betas, + &[self.e][..], + &self.x, + ] + .concat() } } #[derive(Clone, Debug)] -pub struct CommittedInstanceVar { +pub struct CommittedInstanceVar { phi: NonNativeAffineVar, betas: Vec>, e: FpVar, x: Vec>, } -impl AllocVar, C::ScalarField> +impl AllocVar, C::ScalarField> for CommittedInstanceVar { fn new_variable>>( @@ -166,7 +169,7 @@ impl AllocVar, C::Sc } } -impl R1CSVar for CommittedInstanceVar { +impl R1CSVar for CommittedInstanceVar { type Value = CommittedInstance; fn cs(&self) -> ConstraintSystemRef { @@ -191,7 +194,7 @@ impl R1CSVar for CommittedInsta } } -impl CommittedInstanceVarOps for CommittedInstanceVar { +impl CommittedInstanceVarOps for CommittedInstanceVar { type PointVar = NonNativeAffineVar; fn get_commitments(&self) -> Vec { @@ -233,7 +236,7 @@ impl Witness { Self { w, r_w: F::zero() } } - pub fn commit, C: CurveGroup>( + pub fn commit, C: Curve>( &self, params: &CS::ProverParams, x: Vec, @@ -251,7 +254,7 @@ impl Witness { impl Dummy<&R1CS> for Witness { fn dummy(r1cs: &R1CS) -> Self { Self { - w: vec![F::zero(); r1cs.num_witnesses()], + w: vec![F::zero(); r1cs.n_witnesses()], r_w: F::zero(), } } @@ -313,8 +316,8 @@ pub enum ProtoGalaxyError { #[derive(Debug, Clone)] pub struct ProverParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -327,8 +330,8 @@ where } impl CanonicalSerialize for ProverParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -347,8 +350,8 @@ where } impl Valid for ProverParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -367,8 +370,8 @@ where } impl CanonicalDeserialize for ProverParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -392,8 +395,8 @@ where #[derive(Debug, Clone)] pub struct VerifierParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -411,8 +414,8 @@ where impl Valid for VerifierParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -424,8 +427,8 @@ where } impl CanonicalSerialize for VerifierParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { @@ -445,16 +448,15 @@ where impl VerifierParams where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { /// returns the hash of the public parameters of ProtoGalaxy pub fn pp_hash(&self) -> Result { - // TODO (@winderica): support hiding commitments in ProtoGalaxy. - // For now, `H` is set to false. - // Tracking issue: https://github.com/privacy-scaling-explorations/sonobe/issues/82 + // TODO: support hiding commitments in ProtoGalaxy. For now, `H` is set to false. Tracking + // issue: https://github.com/privacy-scaling-explorations/sonobe/issues/82 pp_hash::( &self.r1cs, &self.cf_r1cs, @@ -466,11 +468,7 @@ where } #[derive(PartialEq, Eq, Debug, Clone, CanonicalSerialize, CanonicalDeserialize)] -pub struct IVCProof -where - C1: CurveGroup, - C2: CurveGroup, -{ +pub struct IVCProof { pub i: C1::ScalarField, pub z_0: Vec, pub z_i: Vec, @@ -488,19 +486,14 @@ where /// [ProtoGalaxy]: https://eprint.iacr.org/2023/1106.pdf /// [CycleFold]: https://eprint.iacr.org/2023/1192.pdf #[derive(Clone, Debug)] -pub struct ProtoGalaxy +pub struct ProtoGalaxy where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, { - _gc1: PhantomData, - _c2: PhantomData, - _gc2: PhantomData, /// R1CS of the Augmented Function circuit pub r1cs: R1CS, /// R1CS of the CycleFold circuit @@ -530,20 +523,13 @@ where pub cf_U_i: CycleFoldCommittedInstance, } -impl ProtoGalaxy +impl ProtoGalaxy where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - C1::ScalarField: Absorb, - C2::ScalarField: Absorb, - C1: CurveGroup, { /// This method computes the parameter `t` in ProtoGalaxy for folding `F'`, /// the augmented circuit of `F` @@ -570,7 +556,6 @@ where // For `t_lower_bound`, we configure `F'` with `t = 1` and compute log2 // of the size of `F'`. let state_len = F.state_len(); - let external_inputs_len = F.external_inputs_len(); // `F'` includes `F` and `ProtoGalaxy.V`, where `F` might be costly. // Observing that the cost of `F` is constant with respect to `t`, we @@ -582,18 +567,17 @@ where cs.clone(), 0, Vec::new_witness(cs.clone(), || Ok(vec![Zero::zero(); state_len]))?, - Vec::new_witness(cs.clone(), || Ok(vec![Zero::zero(); external_inputs_len]))?, + FC::ExternalInputsVar::new_witness(cs.clone(), || Ok(FC::ExternalInputs::default()))?, )?; let step_constraints = cs.num_constraints(); // Create a dummy circuit with the same state length and external inputs // length as `F`, which replaces `F` in the augmented circuit `F'`. - let dummy_circuit: DummyCircuit = - FCircuit::::new((state_len, external_inputs_len))?; + let dummy_circuit: DummyCircuit = FCircuit::::new(state_len)?; // Compute `augmentation_constraints`, the size of `F'` without `F`. let cs = ConstraintSystem::::new_ref(); - AugmentedFCircuit::::empty( + AugmentedFCircuit::::empty( poseidon_config, dummy_circuit.clone(), 1, @@ -616,7 +600,7 @@ where for t in t_lower_bound..=t_upper_bound { let cs = ConstraintSystem::::new_ref(); - AugmentedFCircuit::::empty( + AugmentedFCircuit::::empty( poseidon_config, dummy_circuit.clone(), t, @@ -633,21 +617,13 @@ where } } -impl FoldingScheme - for ProtoGalaxy +impl FoldingScheme for ProtoGalaxy where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - C1::ScalarField: Absorb, - C2::ScalarField: Absorb, - C1: CurveGroup, { type PreprocessorParam = (PoseidonConfig>, FC); type ProverParam = ProverParams; @@ -683,18 +659,13 @@ where let f_circuit = FC::new(fc_params)?; let k = 1; - let d = 2; + let d = R1CS::>::empty().degree(); let t = Self::compute_t(&poseidon_config, &f_circuit, d, k)?; // main circuit R1CS: let cs = ConstraintSystem::::new_ref(); - let augmented_F_circuit = AugmentedFCircuit::::empty( - &poseidon_config, - f_circuit.clone(), - t, - d, - k, - ); + let augmented_F_circuit = + AugmentedFCircuit::::empty(&poseidon_config, f_circuit.clone(), t, d, k); augmented_F_circuit.generate_constraints(cs.clone())?; cs.finalize(); let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; @@ -702,7 +673,7 @@ where // CycleFold circuit R1CS let cs2 = ConstraintSystem::::new_ref(); - let cf_circuit = ProtoGalaxyCycleFoldCircuit::::empty(); + let cf_circuit = ProtoGalaxyCycleFoldCircuit::::empty(); cf_circuit.generate_constraints(cs2.clone())?; cs2.finalize(); let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; @@ -726,13 +697,11 @@ where ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { // We fix `k`, the number of incoming instances, to 1, because // multi-instances folding is not supported yet. - // TODO (@winderica): Support multi-instances folding and make `k` a - // constant generic parameter (as in HyperNova) - // Tracking issue: https://github.com/privacy-scaling-explorations/sonobe/issues/82 + // TODO: Support multi-instances folding and make `k` a constant generic parameter (as in + // HyperNova). Tracking issue: + // https://github.com/privacy-scaling-explorations/sonobe/issues/82 let k = 1; - // `d`, the degree of the constraint system, is set to 2, as we only - // support R1CS for now, whose highest degree is 2. - let d = 2; + let d = R1CS::>::empty().degree(); let t = Self::compute_t(poseidon_config, F, d, k)?; // prepare the circuit to obtain its R1CS @@ -740,8 +709,8 @@ where let cs2 = ConstraintSystem::::new_ref(); let augmented_F_circuit = - AugmentedFCircuit::::empty(poseidon_config, F.clone(), t, d, k); - let cf_circuit = ProtoGalaxyCycleFoldCircuit::::empty(); + AugmentedFCircuit::::empty(poseidon_config, F.clone(), t, d, k); + let cf_circuit = ProtoGalaxyCycleFoldCircuit::::empty(); augmented_F_circuit.generate_constraints(cs.clone())?; cs.finalize(); @@ -753,8 +722,16 @@ where let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cf_r1cs = extract_r1cs::(&cs2)?; - let (cs_pp, cs_vp) = CS1::setup(&mut rng, r1cs.A.n_rows)?; - let (cf_cs_pp, cf_cs_vp) = CS2::setup(&mut rng, max(cf_r1cs.A.n_rows, cf_r1cs.A.n_cols))?; + // `CS1` is for committing to ProtoGalaxy's witness vector `w`, so we + // set `len` to the number of witnesses in `r1cs`. + let (cs_pp, cs_vp) = CS1::setup(&mut rng, r1cs.n_witnesses())?; + // `CS2` is for committing to CycleFold's witness vector `w` and error + // term `e`, where the length of `e` is the number of constraints, so we + // set `len` to the maximum of `e` and `w`'s lengths. + let (cf_cs_pp, cf_cs_vp) = CS2::setup( + &mut rng, + max(cf_r1cs.n_constraints(), cf_r1cs.n_witnesses()), + )?; Ok(( Self::ProverParam { @@ -790,9 +767,6 @@ where // W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the // R1CS that we're working with. Ok(Self { - _gc1: PhantomData, - _c2: PhantomData, - _gc2: PhantomData, r1cs: vp.r1cs.clone(), cf_r1cs: vp.cf_r1cs.clone(), poseidon_config: pp.poseidon_config.clone(), @@ -817,7 +791,7 @@ where fn prove_step( &mut self, mut rng: impl RngCore, - external_inputs: Vec, + external_inputs: FC::ExternalInputs, _other_instances: Option, ) -> Result<(), Error> { // Multi-instances folding is not supported yet. @@ -826,20 +800,18 @@ where } // We fix `k`, the number of incoming instances, to 1, because // multi-instances folding is not supported yet. - // TODO (@winderica): Support multi-instances folding and make `k` a - // constant generic parameter (as in HyperNova) - // Tracking issue: https://github.com/privacy-scaling-explorations/sonobe/issues/82 + // TODO: Support multi-instances folding and make `k` a constant generic parameter (as in + // HyperNova). Tracking issue: + // https://github.com/privacy-scaling-explorations/sonobe/issues/82 let k = 1; - // `d`, the degree of the constraint system, is set to 2, as we only - // support R1CS for now, whose highest degree is 2. - let d = 2; + let d = self.r1cs.degree(); // `sponge` is for digest computation. let sponge = PoseidonSponge::::new(&self.poseidon_config); // `transcript` is for challenge generation. let mut transcript_prover = sponge.clone(); - let mut augmented_F_circuit: AugmentedFCircuit; + let mut augmented_F_circuit: AugmentedFCircuit; if self.z_i.len() != self.F.state_len() { return Err(Error::NotSameLength( @@ -849,43 +821,11 @@ where self.F.state_len(), )); } - if external_inputs.len() != self.F.external_inputs_len() { - return Err(Error::NotSameLength( - "F.external_inputs_len()".to_string(), - self.F.external_inputs_len(), - "external_inputs.len()".to_string(), - external_inputs.len(), - )); - } let i_bn: BigUint = self.i.into(); let i_usize: usize = i_bn.try_into().map_err(|_| Error::MaxStep)?; - let z_i1 = self - .F - .step_native(i_usize, self.z_i.clone(), external_inputs.clone())?; - - // folded instance output (public input, x) - // u_{i+1}.x[0] = H(i+1, z_0, z_{i+1}, U_{i+1}) - let u_i1_x: C1::ScalarField; - // u_{i+1}.x[1] = H(cf_U_{i+1}) - let cf_u_i1_x: C1::ScalarField; - if self.i.is_zero() { - // Take extra care of the base case - // `U_{i+1}` (i.e., `U_1`) is fixed to `U_dummy`, so we just use - // `self.U_i = U_0 = U_dummy`. - u_i1_x = self.U_i.hash( - &sponge, - self.pp_hash, - self.i + C1::ScalarField::one(), - &self.z_0, - &z_i1, - ); - // `cf_U_{i+1}` (i.e., `cf_U_1`) is fixed to `cf_U_dummy`, so we - // just use `self.cf_U_i = cf_U_0 = cf_U_dummy`. - cf_u_i1_x = self.cf_U_i.hash_cyclefold(&sponge, self.pp_hash); - augmented_F_circuit = AugmentedFCircuit::empty( &self.poseidon_config, self.F.clone(), @@ -900,7 +840,7 @@ where .external_inputs .clone_from(&external_inputs); - // There is no need to update `self.U_i` etc. as they are unchanged. + // There is no need to update `self.U_i` etc. as they are unchanged. } else { // Primary part: // Compute `U_{i+1}` by folding `u_i` into `U_i`. @@ -914,7 +854,6 @@ where )?; // CycleFold part: - // get the vector used as public inputs 'x' in the CycleFold circuit let mut r0_bits = aux.L_X_evals[0].into_bigint().to_bits_le(); let mut r1_bits = aux.L_X_evals[1].into_bigint().to_bits_le(); r0_bits.resize(C1::ScalarField::MODULUS_BIT_SIZE as usize, false); @@ -922,76 +861,37 @@ where // cyclefold circuit for enforcing: // 0 + U_i.phi * L_evals[0] == phi_stars[0] - let cf1_u_i_x = [ - r0_bits - .chunks(C1::BaseField::MODULUS_BIT_SIZE as usize - 1) - .map(::BigInt::from_bits_le) - .map(C1::BaseField::from) - .collect::>(), - get_cm_coordinates(&C1::zero()), - get_cm_coordinates(&self.U_i.phi), - get_cm_coordinates(&aux.phi_stars[0]), - ] - .concat(); - let cf1_circuit = ProtoGalaxyCycleFoldCircuit:: { - _gc: PhantomData, + let cf1_circuit = ProtoGalaxyCycleFoldCircuit:: { r_bits: Some(r0_bits), points: Some(vec![C1::zero(), self.U_i.phi]), - x: Some(cf1_u_i_x.clone()), }; // cyclefold circuit for enforcing: // phi_stars[0] + u_i.phi * L_evals[1] == U_i1.phi // i.e., U_i.phi * L_evals[0] + u_i.phi * L_evals[1] == U_i1.phi - let cf2_u_i_x = [ - r1_bits - .chunks(C1::BaseField::MODULUS_BIT_SIZE as usize - 1) - .map(::BigInt::from_bits_le) - .map(C1::BaseField::from) - .collect::>(), - get_cm_coordinates(&aux.phi_stars[0]), - get_cm_coordinates(&self.u_i.phi), - get_cm_coordinates(&U_i1.phi), - ] - .concat(); - let cf2_circuit = ProtoGalaxyCycleFoldCircuit:: { - _gc: PhantomData, + let cf2_circuit = ProtoGalaxyCycleFoldCircuit:: { r_bits: Some(r1_bits), points: Some(vec![aux.phi_stars[0], self.u_i.phi]), - x: Some(cf2_u_i_x.clone()), }; // fold self.cf_U_i + cf1_U -> folded running with cf1 - let (_cf1_w_i, cf1_u_i, cf1_W_i1, cf1_U_i1, cf1_cmT, _) = self.fold_cyclefold_circuit( + let (cf1_u_i, cf1_W_i1, cf1_U_i1, cf1_cmT) = self.fold_cyclefold_circuit( &mut transcript_prover, self.cf_W_i.clone(), // CycleFold running instance witness self.cf_U_i.clone(), // CycleFold running instance - cf1_u_i_x, cf1_circuit, &mut rng, )?; // fold [the output from folding self.cf_U_i + cf1_U] + cf2_U = folded_running_with_cf1 + cf2 - let (_cf2_w_i, cf2_u_i, cf_W_i1, cf_U_i1, cf2_cmT, _) = self.fold_cyclefold_circuit( + let (cf2_u_i, cf_W_i1, cf_U_i1, cf2_cmT) = self.fold_cyclefold_circuit( &mut transcript_prover, cf1_W_i1, cf1_U_i1.clone(), - cf2_u_i_x, cf2_circuit, &mut rng, )?; - // Derive `u_{i+1}.x[0], u_{i+1}.x[1]` by hashing folded instances - u_i1_x = U_i1.hash( - &sponge, - self.pp_hash, - self.i + C1::ScalarField::one(), - &self.z_0, - &z_i1, - ); - cf_u_i1_x = cf_U_i1.hash_cyclefold(&sponge, self.pp_hash); - augmented_F_circuit = AugmentedFCircuit { - _gc2: PhantomData, poseidon_config: self.poseidon_config.clone(), pp_hash: self.pp_hash, i: self.i, @@ -1006,14 +906,12 @@ where K_coeffs: proof.K_coeffs.clone(), phi_stars: aux.phi_stars, F: self.F.clone(), - x: Some(u_i1_x), // cyclefold values cf1_u_i_cmW: cf1_u_i.cmW, cf2_u_i_cmW: cf2_u_i.cmW, cf_U_i: self.cf_U_i.clone(), cf1_cmT, cf2_cmT, - cf_x: Some(cf_u_i1_x), }; #[cfg(test)] @@ -1028,11 +926,6 @@ where )?, U_i1 ); - cf1_u_i.check_incoming()?; - cf2_u_i.check_incoming()?; - self.cf_r1cs.check_relation(&_cf1_w_i, &cf1_u_i)?; - self.cf_r1cs.check_relation(&_cf2_w_i, &cf2_u_i)?; - self.cf_r1cs.check_relation(&self.cf_W_i, &self.cf_U_i)?; } self.W_i = W_i1; @@ -1043,16 +936,15 @@ where let cs = ConstraintSystem::::new_ref(); - augmented_F_circuit.generate_constraints(cs.clone())?; + let z_i1 = augmented_F_circuit + .compute_next_state(cs.clone())? + .value()?; #[cfg(test)] - assert!(cs.is_satisfied().unwrap()); + assert!(cs.is_satisfied()?); let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let (w_i1, x_i1) = extract_w_x::(&cs); - if x_i1[0] != u_i1_x || x_i1[1] != cf_u_i1_x { - return Err(Error::NotEqual); - } #[cfg(test)] if x_i1.len() != 2 { @@ -1114,9 +1006,6 @@ where let f_circuit = FC::new(fcircuit_params)?; Ok(Self { - _gc1: PhantomData, - _c2: PhantomData, - _gc2: PhantomData, r1cs: vp.r1cs.clone(), cf_r1cs: vp.cf_r1cs.clone(), poseidon_config: pp.poseidon_config, @@ -1184,20 +1073,13 @@ where } } -impl ProtoGalaxy +impl ProtoGalaxy where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, + C1: Curve, + C2: Curve, FC: FCircuit, CS1: CommitmentScheme, CS2: CommitmentScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, { // folds the given cyclefold circuit and its instances #[allow(clippy::type_complexity)] @@ -1206,28 +1088,24 @@ where transcript: &mut PoseidonSponge, cf_W_i: CycleFoldWitness, // witness of the running instance cf_U_i: CycleFoldCommittedInstance, // running instance - cf_u_i_x: Vec, - cf_circuit: ProtoGalaxyCycleFoldCircuit, + cf_circuit: ProtoGalaxyCycleFoldCircuit, rng: &mut impl RngCore, ) -> Result< ( - CycleFoldWitness, CycleFoldCommittedInstance, // u_i CycleFoldWitness, // W_i1 CycleFoldCommittedInstance, // U_i1 C2, // cmT - C2::ScalarField, // r_Fq ), Error, > { - fold_cyclefold_circuit::, C1, GC1, C2, GC2, CS2, false>( + fold_cyclefold_circuit::, C2, CS2, false>( transcript, self.cf_r1cs.clone(), self.cf_cs_params.clone(), self.pp_hash, cf_W_i, cf_U_i, - cf_u_i_x, cf_circuit, rng, ) @@ -1238,8 +1116,8 @@ where mod tests { use super::*; - use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_bn254::{Bn254, Fr, G1Projective as Projective}; + use ark_grumpkin::Projective as Projective2; use ark_std::test_rng; use rayon::prelude::*; @@ -1252,83 +1130,77 @@ mod tests { /// This test tests the ProtoGalaxy+CycleFold IVC, and by consequence it is /// also testing the AugmentedFCircuit #[test] - fn test_ivc() { + fn test_ivc() -> Result<(), Error> { let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; // run the test using Pedersen commitments on both sides of the curve cycle - test_ivc_opt::, Pedersen>( + let _ = test_ivc_opt::, Pedersen>( poseidon_config.clone(), F_circuit, - ); + )?; // run the test using KZG for the commitments on the main curve, and Pedersen for the // commitments on the secondary curve - test_ivc_opt::, Pedersen>(poseidon_config, F_circuit); + let _ = test_ivc_opt::, Pedersen>(poseidon_config, F_circuit)?; + Ok(()) } // test_ivc allowing to choose the CommitmentSchemes fn test_ivc_opt, CS2: CommitmentScheme>( poseidon_config: PoseidonConfig, F_circuit: CubicFCircuit, - ) { - type PG = - ProtoGalaxy, CS1, CS2>; + ) -> Result<(), Error> { + type PG = ProtoGalaxy, CS1, CS2>; - let params = - PG::::preprocess(&mut test_rng(), &(poseidon_config, F_circuit)).unwrap(); + let params = PG::::preprocess(&mut test_rng(), &(poseidon_config, F_circuit))?; let z_0 = vec![Fr::from(3_u32)]; - let mut protogalaxy = PG::init(¶ms, F_circuit, z_0.clone()).unwrap(); + let mut protogalaxy = PG::init(¶ms, F_circuit, z_0.clone())?; let num_steps: usize = 3; for _ in 0..num_steps { - protogalaxy - .prove_step(&mut test_rng(), vec![], None) - .unwrap(); + protogalaxy.prove_step(&mut test_rng(), (), None)?; } assert_eq!(Fr::from(num_steps as u32), protogalaxy.i); let ivc_proof = protogalaxy.ivc_proof(); - PG::::verify(params.1, ivc_proof).unwrap(); + PG::::verify(params.1, ivc_proof)?; + Ok(()) } #[ignore] #[test] - fn test_t_bounds() { - let d = 2; + fn test_t_bounds() -> Result<(), Error> { + let d = R1CS::::empty().degree(); let k = 1; let poseidon_config = poseidon_canonical_config::(); for state_len in [1, 10, 100] { - for external_inputs_len in [1, 10, 100] { - let dummy_circuit: DummyCircuit = - FCircuit::::new((state_len, external_inputs_len)).unwrap(); - - let costs = (1..32) - .into_par_iter() - .map(|t| { - let cs = ConstraintSystem::::new_ref(); - AugmentedFCircuit::::empty( - &poseidon_config, - dummy_circuit.clone(), - t, - d, - k, - ) - .generate_constraints(cs.clone()) - .unwrap(); - cs.num_constraints() - }) - .collect::>(); - - for t_lower_bound in log2(costs[0]) as usize..32 { - let num_constraints = - (1 << t_lower_bound) - costs[0] + costs[t_lower_bound - 1]; - let t = log2(num_constraints) as usize; - assert!(t == t_lower_bound || t == t_lower_bound + 1); - } + let dummy_circuit: DummyCircuit = FCircuit::::new(state_len)?; + + let costs: Vec = (1..32) + .into_par_iter() + .map(|t| { + let cs = ConstraintSystem::::new_ref(); + AugmentedFCircuit::::empty( + &poseidon_config, + dummy_circuit.clone(), + t, + d, + k, + ) + .generate_constraints(cs.clone())?; + Ok(cs.num_constraints()) + }) + .collect::, Error>>()?; + + for t_lower_bound in log2(costs[0]) as usize..32 { + let num_constraints = (1 << t_lower_bound) - costs[0] + costs[t_lower_bound - 1]; + let t = log2(num_constraints) as usize; + assert!(t == t_lower_bound || t == t_lower_bound + 1); } } + Ok(()) } } diff --git a/folding-schemes/src/folding/protogalaxy/traits.rs b/folding-schemes/src/folding/protogalaxy/traits.rs index a5b6dafa..741cb6e7 100644 --- a/folding-schemes/src/folding/protogalaxy/traits.rs +++ b/folding-schemes/src/folding/protogalaxy/traits.rs @@ -1,11 +1,9 @@ use ark_crypto_primitives::sponge::{constraints::AbsorbGadget, Absorb}; -use ark_ec::CurveGroup; use ark_ff::PrimeField; use ark_r1cs_std::{ eq::EqGadget, fields::{fp::FpVar, FieldVar}, uint8::UInt8, - ToConstraintFieldGadget, }; use ark_relations::r1cs::SynthesisError; use ark_std::{cfg_into_iter, log2, One}; @@ -19,27 +17,22 @@ use super::{ use crate::{ arith::{ r1cs::{circuits::R1CSMatricesVar, R1CS}, - Arith, ArithGadget, + ArithRelation, ArithRelationGadget, }, folding::circuits::CF1, - transcript::AbsorbNonNative, + transcript::AbsorbNonNativeGadget, utils::vec::is_zero_vec, - Error, + Curve, Error, }; // Implements the trait for absorbing ProtoGalaxy's CommittedInstance. -impl Absorb for CommittedInstance -where - C::ScalarField: Absorb, -{ +impl Absorb for CommittedInstance { fn to_sponge_bytes(&self, dest: &mut Vec) { C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest); } fn to_sponge_field_elements(&self, dest: &mut Vec) { - self.phi - .to_native_sponge_field_elements_as_vec() - .to_sponge_field_elements(dest); + self.phi.to_native_sponge_field_elements(dest); self.betas.to_sponge_field_elements(dest); self.e.to_sponge_field_elements(dest); self.x.to_sponge_field_elements(dest); @@ -47,16 +40,14 @@ where } // Implements the trait for absorbing ProtoGalaxy's CommittedInstanceVar in-circuit. -impl AbsorbGadget - for CommittedInstanceVar -{ +impl AbsorbGadget for CommittedInstanceVar { fn to_sponge_bytes(&self) -> Result>, SynthesisError> { FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?) } fn to_sponge_field_elements(&self) -> Result>, SynthesisError> { Ok([ - self.phi.to_constraint_field()?, + self.phi.to_native_sponge_field_elements()?, self.betas.to_sponge_field_elements()?, self.e.to_sponge_field_elements()?, self.x.to_sponge_field_elements()?, @@ -65,14 +56,14 @@ impl AbsorbGadget } } -/// Implements `Arith` for R1CS, where the witness is of type [`Witness`], and -/// the committed instance is of type [`CommittedInstance`]. +/// Implements [`ArithRelation`] for R1CS, where the witness is of type +/// [`Witness`], and the committed instance is of type [`CommittedInstance`]. /// /// Due to the error term `CommittedInstance.e`, R1CS here is considered as a /// relaxed R1CS. /// /// See `nova/traits.rs` for the rationale behind the design. -impl Arith>, CommittedInstance> +impl ArithRelation>, CommittedInstance> for R1CS> { type Evaluation = Vec>; @@ -113,7 +104,7 @@ impl Arith>, CommittedInstance ArithGadget>, CommittedInstanceVar> +impl ArithRelationGadget>, CommittedInstanceVar> for R1CSMatricesVar, FpVar>> { type Evaluation = (Vec>>, Vec>>); @@ -154,7 +145,7 @@ pub mod tests { /// test that checks the native CommittedInstance.to_sponge_{bytes,field_elements} /// vs the R1CS constraints version #[test] - pub fn test_committed_instance_to_sponge_preimage() { + pub fn test_committed_instance_to_sponge_preimage() -> Result<(), Error> { let mut rng = ark_std::test_rng(); let t = rng.gen::() as usize; @@ -173,15 +164,15 @@ pub mod tests { let cs = ConstraintSystem::::new_ref(); let ciVar = - CommittedInstanceVar::::new_witness(cs.clone(), || Ok(ci.clone())) - .unwrap(); - let bytes_var = ciVar.to_sponge_bytes().unwrap(); - let field_elements_var = ciVar.to_sponge_field_elements().unwrap(); + CommittedInstanceVar::::new_witness(cs.clone(), || Ok(ci.clone()))?; + let bytes_var = ciVar.to_sponge_bytes()?; + let field_elements_var = ciVar.to_sponge_field_elements()?; - assert!(cs.is_satisfied().unwrap()); + assert!(cs.is_satisfied()?); // check that the natively computed and in-circuit computed hashes match - assert_eq!(bytes_var.value().unwrap(), bytes); - assert_eq!(field_elements_var.value().unwrap(), field_elements); + assert_eq!(bytes_var.value()?, bytes); + assert_eq!(field_elements_var.value()?, field_elements); + Ok(()) } } diff --git a/folding-schemes/src/folding/protogalaxy/utils.rs b/folding-schemes/src/folding/protogalaxy/utils.rs index 596235ce..998216fd 100644 --- a/folding-schemes/src/folding/protogalaxy/utils.rs +++ b/folding-schemes/src/folding/protogalaxy/utils.rs @@ -107,7 +107,6 @@ pub fn pow_i_var(mut i: usize, betas: &[FpVar]) -> FpVar { #[cfg(test)] mod tests { - use std::error::Error; use ark_bn254::Fr; use ark_r1cs_std::{alloc::AllocVar, R1CSVar}; @@ -116,9 +115,10 @@ mod tests { use rand::Rng; use super::*; + use crate::Error; #[test] - fn test_exponential_powers() -> Result<(), Box> { + fn test_exponential_powers() -> Result<(), Error> { let rng = &mut test_rng(); for t in 1..10 { @@ -138,7 +138,7 @@ mod tests { } #[test] - fn test_all_powers() -> Result<(), Box> { + fn test_all_powers() -> Result<(), Error> { let rng = &mut test_rng(); for n in 1..10 { @@ -158,7 +158,7 @@ mod tests { } #[test] - fn test_betas_star() -> Result<(), Box> { + fn test_betas_star() -> Result<(), Error> { let rng = &mut test_rng(); for t in 1..10 { @@ -182,7 +182,7 @@ mod tests { } #[test] - fn test_pow_i() -> Result<(), Box> { + fn test_pow_i() -> Result<(), Error> { let rng = &mut test_rng(); for t in 1..10 { diff --git a/folding-schemes/src/folding/traits.rs b/folding-schemes/src/folding/traits.rs index d451347b..74ab3b28 100644 --- a/folding-schemes/src/folding/traits.rs +++ b/folding-schemes/src/folding/traits.rs @@ -3,16 +3,18 @@ use ark_crypto_primitives::sponge::{ poseidon::constraints::PoseidonSpongeVar, Absorb, }; -use ark_ec::CurveGroup; use ark_ff::PrimeField; -use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, ToConstraintFieldGadget}; +use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar}; use ark_relations::r1cs::SynthesisError; -use crate::{transcript::Transcript, Error}; +use crate::{ + transcript::{AbsorbNonNativeGadget, Transcript}, + Curve, Error, +}; use super::circuits::CF1; -pub trait CommittedInstanceOps: Inputize, Self::Var> { +pub trait CommittedInstanceOps: Inputize> { /// The in-circuit representation of the committed instance. type Var: AllocVar> + CommittedInstanceVarOps; /// `hash` implements the committed instance hash compatible with the @@ -29,7 +31,6 @@ pub trait CommittedInstanceOps: Inputize, Self::Var> { z_i: &[CF1], ) -> CF1 where - CF1: Absorb, Self: Sized + Absorb, { let mut sponge = sponge.clone(); @@ -56,8 +57,8 @@ pub trait CommittedInstanceOps: Inputize, Self::Var> { } } -pub trait CommittedInstanceVarOps { - type PointVar: ToConstraintFieldGadget>; +pub trait CommittedInstanceVarOps { + type PointVar: AbsorbNonNativeGadget>; /// `hash` implements the in-circuit committed instance hash compatible with /// the native implementation from `CommittedInstanceOps::hash`. /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and @@ -141,8 +142,37 @@ impl Dummy<()> for T { } /// Converts a value `self` into a vector of field elements, ordered in the same -/// way as how a variable of type `Var` would be represented in the circuit. +/// way as how a variable of type `Var` would be represented *natively* in the +/// circuit. +/// /// This is useful for the verifier to compute the public inputs. -pub trait Inputize { +pub trait Inputize { fn inputize(&self) -> Vec; } + +/// Converts a value `self` into a vector of field elements, ordered in the same +/// way as how a variable of type `Var` would be represented *non-natively* in +/// the circuit. +/// +/// This is useful for the verifier to compute the public inputs. +/// +/// Note that we require this trait because we need to distinguish between some +/// data types that are represented both natively and non-natively in-circuit +/// (e.g., field elements can have type `FpVar` and `NonNativeUintVar`). +pub trait InputizeNonNative { + fn inputize_nonnative(&self) -> Vec; +} + +impl> Inputize for [T] { + fn inputize(&self) -> Vec { + self.iter().flat_map(Inputize::::inputize).collect() + } +} + +impl> InputizeNonNative for [T] { + fn inputize_nonnative(&self) -> Vec { + self.iter() + .flat_map(InputizeNonNative::::inputize_nonnative) + .collect() + } +} diff --git a/folding-schemes/src/frontend/mod.rs b/folding-schemes/src/frontend/mod.rs index 8570c818..43191e61 100644 --- a/folding-schemes/src/frontend/mod.rs +++ b/folding-schemes/src/frontend/mod.rs @@ -1,6 +1,6 @@ use crate::Error; use ark_ff::PrimeField; -use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar}; use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; use ark_std::fmt::Debug; @@ -10,8 +10,14 @@ pub mod utils; /// inside the agmented F' function). /// The parameter z_i denotes the current state, and z_{i+1} denotes the next state after applying /// the step. +/// Note that the external inputs for the specific circuit are defined at the implementation of +/// both `FCircuit::ExternalInputs` and `FCircuit::ExternalInputsVar`, where the `Default` trait +/// implementation for the `ExternalInputs` returns the initialized data structure (ie. if the type +/// contains a vector, it is initialized at the expected length). pub trait FCircuit: Clone + Debug { type Params: Debug; + type ExternalInputs: Clone + Default + Debug; + type ExternalInputsVar: Clone + Debug + AllocVar; /// returns a new FCircuit instance fn new(params: Self::Params) -> Result; @@ -20,21 +26,6 @@ pub trait FCircuit: Clone + Debug { /// FCircuit inputs. fn state_len(&self) -> usize; - /// returns the number of elements in the external inputs used by the FCircuit. External inputs - /// are optional, and in case no external inputs are used, this method should return 0. - fn external_inputs_len(&self) -> usize; - - /// computes the next state values in place, assigning z_{i+1} into z_i, and computing the new - /// z_{i+1} - fn step_native( - // this method uses self, so that each FCircuit implementation (and different frontends) - // can hold a state if needed to store data to compute the next state. - &self, - i: usize, - z_i: Vec, - external_inputs: Vec, // inputs that are not part of the state - ) -> Result, Error>; - /// generates the constraints for the step of F for the given z_i fn generate_step_constraints( // this method uses self, so that each FCircuit implementation (and different frontends) @@ -43,7 +34,7 @@ pub trait FCircuit: Clone + Debug { cs: ConstraintSystemRef, i: usize, z_i: Vec>, - external_inputs: Vec>, // inputs that are not part of the state + external_inputs: Self::ExternalInputsVar, // inputs that are not part of the state ) -> Result>, SynthesisError>; } @@ -53,34 +44,36 @@ pub mod tests { use ark_bn254::Fr; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; - use utils::{CubicFCircuit, CustomFCircuit, WrapperCircuit}; + use utils::{custom_step_native, CubicFCircuit, CustomFCircuit, WrapperCircuit}; #[test] - fn test_testfcircuit() { + fn test_testfcircuit() -> Result<(), Error> { let cs = ConstraintSystem::::new_ref(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); + let F_circuit = CubicFCircuit::::new(())?; let wrapper_circuit = WrapperCircuit::> { FC: F_circuit, z_i: Some(vec![Fr::from(3_u32)]), z_i1: Some(vec![Fr::from(35_u32)]), }; - wrapper_circuit.generate_constraints(cs.clone()).unwrap(); + wrapper_circuit.generate_constraints(cs.clone())?; assert_eq!(cs.num_constraints(), 3); + Ok(()) } #[test] - fn test_customtestfcircuit() { + fn test_customtestfcircuit() -> Result<(), Error> { let cs = ConstraintSystem::::new_ref(); let n_constraints = 1000; - let custom_circuit = CustomFCircuit::::new(n_constraints).unwrap(); + let custom_circuit = CustomFCircuit::::new(n_constraints)?; let z_i = vec![Fr::from(5_u32)]; let wrapper_circuit = WrapperCircuit::> { FC: custom_circuit, z_i: Some(z_i.clone()), - z_i1: Some(custom_circuit.step_native(0, z_i, vec![]).unwrap()), + z_i1: Some(custom_step_native(z_i, n_constraints)), }; - wrapper_circuit.generate_constraints(cs.clone()).unwrap(); + wrapper_circuit.generate_constraints(cs.clone())?; assert_eq!(cs.num_constraints(), n_constraints); + Ok(()) } } diff --git a/folding-schemes/src/frontend/utils.rs b/folding-schemes/src/frontend/utils.rs index 27d4ec98..51bceacc 100644 --- a/folding-schemes/src/frontend/utils.rs +++ b/folding-schemes/src/frontend/utils.rs @@ -1,50 +1,38 @@ use ark_ff::PrimeField; -use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar}; -use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; -#[cfg(test)] +use ark_r1cs_std::{ + alloc::AllocVar, + fields::{fp::FpVar, FieldVar}, +}; +use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; use ark_std::marker::PhantomData; use ark_std::{fmt::Debug, Zero}; use super::FCircuit; use crate::Error; -/// DummyCircuit is a circuit that has dummy state and external inputs whose -/// lengths are specified in the `state_len` and `external_inputs_len` -/// parameters, without any constraints. +/// DummyCircuit is a circuit that has dummy state whose length is specified in the `state_len` +/// parameter, without any constraints. #[derive(Clone, Debug)] pub struct DummyCircuit { state_len: usize, - external_inputs_len: usize, } impl FCircuit for DummyCircuit { - type Params = (usize, usize); + type Params = usize; + type ExternalInputs = (); + type ExternalInputsVar = (); - fn new((state_len, external_inputs_len): Self::Params) -> Result { - Ok(Self { - state_len, - external_inputs_len, - }) + fn new(state_len: Self::Params) -> Result { + Ok(Self { state_len }) } fn state_len(&self) -> usize { self.state_len } - fn external_inputs_len(&self) -> usize { - self.external_inputs_len - } - fn step_native( - &self, - _i: usize, - _z_i: Vec, - _external_inputs: Vec, - ) -> Result, Error> { - Ok(vec![F::zero(); self.state_len]) - } fn generate_step_constraints( &self, cs: ConstraintSystemRef, _i: usize, _z_i: Vec>, - _external_inputs: Vec>, + _external_inputs: Self::ExternalInputsVar, ) -> Result>, SynthesisError> { Vec::new_witness(cs.clone(), || Ok(vec![Zero::zero(); self.state_len])) } @@ -63,29 +51,21 @@ pub struct CubicFCircuit { #[cfg(test)] impl FCircuit for CubicFCircuit { type Params = (); + type ExternalInputs = (); + type ExternalInputsVar = (); + fn new(_params: Self::Params) -> Result { Ok(Self { _f: PhantomData }) } fn state_len(&self) -> usize { 1 } - fn external_inputs_len(&self) -> usize { - 0 - } - fn step_native( - &self, - _i: usize, - z_i: Vec, - _external_inputs: Vec, - ) -> Result, Error> { - Ok(vec![z_i[0] * z_i[0] * z_i[0] + z_i[0] + F::from(5_u32)]) - } fn generate_step_constraints( &self, cs: ConstraintSystemRef, _i: usize, z_i: Vec>, - _external_inputs: Vec>, + _external_inputs: Self::ExternalInputsVar, ) -> Result>, SynthesisError> { let five = FpVar::::new_constant(cs.clone(), F::from(5u32))?; let z_i = z_i[0].clone(); @@ -94,18 +74,25 @@ impl FCircuit for CubicFCircuit { } } +/// Native implementation of `CubicFCircuit` +#[cfg(test)] +pub fn cubic_step_native(z_i: Vec) -> Vec { + let z = z_i[0]; + vec![z * z * z + z + F::from(5)] +} + /// CustomFCircuit is a circuit that has the number of constraints specified in the /// `n_constraints` parameter. Note that the generated circuit will have very sparse matrices. -#[cfg(test)] #[derive(Clone, Copy, Debug)] pub struct CustomFCircuit { _f: PhantomData, pub n_constraints: usize, } -#[cfg(test)] impl FCircuit for CustomFCircuit { type Params = usize; + type ExternalInputs = (); + type ExternalInputsVar = (); fn new(params: Self::Params) -> Result { Ok(Self { @@ -116,37 +103,32 @@ impl FCircuit for CustomFCircuit { fn state_len(&self) -> usize { 1 } - fn external_inputs_len(&self) -> usize { - 0 - } - fn step_native( - &self, - _i: usize, - z_i: Vec, - _external_inputs: Vec, - ) -> Result, Error> { - let mut z_i1 = F::one(); - for _ in 0..self.n_constraints - 1 { - z_i1 *= z_i[0]; - } - Ok(vec![z_i1]) - } fn generate_step_constraints( &self, - cs: ConstraintSystemRef, + _cs: ConstraintSystemRef, _i: usize, z_i: Vec>, - _external_inputs: Vec>, + _external_inputs: Self::ExternalInputsVar, ) -> Result>, SynthesisError> { - let mut z_i1 = FpVar::::new_witness(cs.clone(), || Ok(F::one()))?; + let mut z_i1 = z_i[0].clone(); for _ in 0..self.n_constraints - 1 { - z_i1 *= z_i[0].clone(); + z_i1 = z_i1.square()?; } Ok(vec![z_i1]) } } +/// Native implementation of `CubicFCircuit` +#[cfg(test)] +pub fn custom_step_native(z_i: Vec, n_constraints: usize) -> Vec { + let mut z_i1 = z_i[0]; + for _ in 0..n_constraints - 1 { + z_i1 = z_i1.square(); + } + vec![z_i1] +} + /// WrapperCircuit is a circuit that wraps any circuit that implements the FCircuit trait. This /// is used to test the `FCircuit.generate_step_constraints` method. This is a similar wrapping /// than the one done in the `AugmentedFCircuit`, but without adding all the extra constraints @@ -158,19 +140,17 @@ pub struct WrapperCircuit> { pub z_i1: Option>, } -impl ark_relations::r1cs::ConstraintSynthesizer for WrapperCircuit -where - F: PrimeField, - FC: FCircuit, -{ +impl> ConstraintSynthesizer for WrapperCircuit { fn generate_constraints(self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { let z_i = Vec::>::new_witness(cs.clone(), || Ok(self.z_i.unwrap_or(vec![F::zero()])))?; let z_i1 = Vec::>::new_input(cs.clone(), || Ok(self.z_i1.unwrap_or(vec![F::zero()])))?; + let external_inputs = + FC::ExternalInputsVar::new_input(cs.clone(), || Ok(FC::ExternalInputs::default()))?; let computed_z_i1 = self.FC - .generate_step_constraints(cs.clone(), 0, z_i.clone(), vec![])?; + .generate_step_constraints(cs.clone(), 0, z_i.clone(), external_inputs)?; use ark_r1cs_std::eq::EqGadget; computed_z_i1.enforce_equal(&z_i1)?; diff --git a/folding-schemes/src/lib.rs b/folding-schemes/src/lib.rs index 890d6543..fe4cd2f0 100644 --- a/folding-schemes/src/lib.rs +++ b/folding-schemes/src/lib.rs @@ -2,14 +2,27 @@ #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] -use ark_ec::{pairing::Pairing, CurveGroup}; -use ark_ff::PrimeField; +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::{ + pairing::Pairing, + short_weierstrass::{Projective, SWCurveConfig}, + CurveGroup, +}; +use ark_ff::{Fp, FpConfig, PrimeField}; +use ark_r1cs_std::{ + fields::{fp::FpVar, FieldVar}, + groups::{curves::short_weierstrass::ProjectiveVar, CurveVar}, +}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use ark_std::rand::CryptoRng; -use ark_std::{fmt::Debug, rand::RngCore}; +use ark_std::{ + fmt::Debug, + rand::{CryptoRng, RngCore}, +}; use thiserror::Error; +use crate::folding::traits::{Inputize, InputizeNonNative}; use crate::frontend::FCircuit; +use crate::transcript::AbsorbNonNative; pub mod arith; pub mod commitment; @@ -89,6 +102,16 @@ pub enum Error { #[error("Commitment verification failed")] CommitmentVerificationFail, + // Polynomial IOP errors, from https://github.com/EspressoSystems/hyperplonk/blob/main/subroutines/src/poly_iop/errors.rs + #[error("Invalid Polynomial IOP Prover: {0}")] + InvalidPolyIOPProver(String), + #[error("Invalid Polynomial IOP Verifier: {0}")] + InvalidPolyIOPVerifier(String), + #[error("Invalid Polynomial IOP Proof: {0}")] + InvalidPolyIOPProof(String), + #[error("Invalid Polynomial IOP Parameters: {0}")] + InvalidPolyIOPParameters(String), + // Other #[error("{0}")] Other(String), @@ -121,11 +144,11 @@ pub enum Error { /// coordinates) are in the C1::ScalarField. /// /// In other words, C1.Fq == C2.Fr, and C1.Fr == C2.Fq. -pub trait FoldingScheme: Clone + Debug -where - C1: CurveGroup, - C2::BaseField: PrimeField, +pub trait FoldingScheme< + C1: Curve, + C2: Curve, FC: FCircuit, +>: Clone + Debug { type PreprocessorParam: Debug + Clone; type ProverParam: Debug + Clone + CanonicalSerialize; @@ -174,7 +197,7 @@ where fn prove_step( &mut self, rng: impl RngCore, - external_inputs: Vec, + external_inputs: FC::ExternalInputs, other_instances: Option, ) -> Result<(), Error>; @@ -199,11 +222,11 @@ where /// Trait with auxiliary methods for multi-folding schemes (ie. HyperNova, ProtoGalaxy, etc), /// allowing to create new instances for the multifold. -pub trait MultiFolding: Clone + Debug -where - C1: CurveGroup, - C2::BaseField: PrimeField, +pub trait MultiFolding< + C1: Curve, + C2: Curve, FC: FCircuit, +>: Clone + Debug { type RunningInstance: Debug; type IncomingInstance: Debug; @@ -214,7 +237,7 @@ where &self, rng: impl RngCore, state: Vec, - external_inputs: Vec, + external_inputs: FC::ExternalInputs, ) -> Result; /// Creates a new IncomingInstance for the given state, to be folded in the multi-folding step. @@ -222,18 +245,16 @@ where &self, rng: impl RngCore, state: Vec, - external_inputs: Vec, + external_inputs: FC::ExternalInputs, ) -> Result; } pub trait Decider< - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, FC: FCircuit, FS: FoldingScheme, -> where - C1: CurveGroup, - C2::BaseField: PrimeField, +> { type PreprocessorParam: Debug; type ProverParam: Clone; @@ -245,7 +266,6 @@ pub trait Decider< fn preprocess( rng: impl RngCore + CryptoRng, prep_param: Self::PreprocessorParam, - fs: FS, ) -> Result<(Self::ProverParam, Self::VerifierParam), Error>; fn prove( @@ -268,10 +288,11 @@ pub trait Decider< } /// DeciderOnchain extends the Decider into preparing the calldata -pub trait DeciderOnchain -where - C1: CurveGroup, - C2::BaseField: PrimeField, +pub trait DeciderOnchain< + E: Pairing, + C1: Curve, + C2: Curve, +> { type Proof; type CommittedInstance: Clone + Debug; @@ -285,3 +306,32 @@ where proof: Self::Proof, ) -> Result, Error>; } + +/// `Field` trait is a wrapper around `PrimeField` that also includes the +/// necessary bounds for the field to be used conveniently in folding schemes. +pub trait Field: + PrimeField + Absorb + AbsorbNonNative + Inputize +{ + /// The in-circuit variable type for this field. + type Var: FieldVar; +} + +impl, const N: usize> Field for Fp { + type Var = FpVar; +} + +/// `Curve` trait is a wrapper around `CurveGroup` that also includes the +/// necessary bounds for the curve to be used conveniently in folding schemes. +pub trait Curve: + CurveGroup + + AbsorbNonNative + + Inputize + + InputizeNonNative +{ + /// The in-circuit variable type for this curve. + type Var: CurveVar; +} + +impl> Curve for Projective

{ + type Var = ProjectiveVar>; +} diff --git a/folding-schemes/src/transcript/mod.rs b/folding-schemes/src/transcript/mod.rs index 2c728d46..de3a0572 100644 --- a/folding-schemes/src/transcript/mod.rs +++ b/folding-schemes/src/transcript/mod.rs @@ -1,9 +1,7 @@ use ark_crypto_primitives::sponge::{constraints::CryptographicSpongeVar, CryptographicSponge}; use ark_ec::CurveGroup; use ark_ff::PrimeField; -use ark_r1cs_std::{ - boolean::Boolean, fields::fp::FpVar, groups::CurveVar, ToConstraintFieldGadget, -}; +use ark_r1cs_std::{boolean::Boolean, fields::fp::FpVar, groups::CurveVar}; use ark_relations::r1cs::SynthesisError; pub mod poseidon; @@ -11,14 +9,14 @@ pub mod poseidon; /// An interface for objects that can be absorbed by a `Transcript`. /// /// Matches `Absorb` in `ark-crypto-primitives`. -pub trait AbsorbNonNative { +pub trait AbsorbNonNative { /// Converts the object into field elements that can be absorbed by a `Transcript`. /// Append the list to `dest` - fn to_native_sponge_field_elements(&self, dest: &mut Vec); + fn to_native_sponge_field_elements(&self, dest: &mut Vec); /// Converts the object into field elements that can be absorbed by a `Transcript`. /// Return the list as `Vec` - fn to_native_sponge_field_elements_as_vec(&self) -> Vec { + fn to_native_sponge_field_elements_as_vec(&self) -> Vec { let mut result = Vec::new(); self.to_native_sponge_field_elements(&mut result); result @@ -34,6 +32,30 @@ pub trait AbsorbNonNativeGadget { fn to_native_sponge_field_elements(&self) -> Result>, SynthesisError>; } +impl AbsorbNonNative for [T] { + fn to_native_sponge_field_elements(&self, dest: &mut Vec) { + for t in self.iter() { + t.to_native_sponge_field_elements(dest); + } + } +} + +impl> AbsorbNonNativeGadget for &T { + fn to_native_sponge_field_elements(&self) -> Result>, SynthesisError> { + T::to_native_sponge_field_elements(self) + } +} + +impl> AbsorbNonNativeGadget for [T] { + fn to_native_sponge_field_elements(&self) -> Result>, SynthesisError> { + let mut result = Vec::new(); + for t in self.iter() { + result.extend(t.to_native_sponge_field_elements()?); + } + Ok(result) + } +} + pub trait Transcript: CryptographicSponge { /// `absorb_point` is for absorbing points whose `BaseField` is the field of /// the sponge, i.e., the type `C` of these points should satisfy @@ -57,7 +79,7 @@ pub trait Transcript: CryptographicSponge { /// Note that although a `CommittedInstance` for `AugmentedFCircuit` on /// the primary curve also contains non-native elements, we still regard /// it as native, because the sponge is on the same curve. - fn absorb_nonnative>(&mut self, v: &V); + fn absorb_nonnative(&mut self, v: &V); fn get_challenge(&mut self) -> F; /// get_challenge_nbits returns a field element of size nbits @@ -74,7 +96,7 @@ pub trait TranscriptVar: /// /// If the sponge field `F` is `C::ScalarField`, call `absorb_nonnative` /// instead. - fn absorb_point, GC: CurveVar + ToConstraintFieldGadget>( + fn absorb_point, GC: CurveVar>( &mut self, v: &GC, ) -> Result<(), SynthesisError>; diff --git a/folding-schemes/src/transcript/poseidon.rs b/folding-schemes/src/transcript/poseidon.rs index 38131094..90709757 100644 --- a/folding-schemes/src/transcript/poseidon.rs +++ b/folding-schemes/src/transcript/poseidon.rs @@ -5,9 +5,7 @@ use ark_crypto_primitives::sponge::{ }; use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{BigInteger, PrimeField}; -use ark_r1cs_std::{ - boolean::Boolean, fields::fp::FpVar, groups::CurveVar, ToConstraintFieldGadget, -}; +use ark_r1cs_std::{boolean::Boolean, fields::fp::FpVar, groups::CurveVar}; use ark_relations::r1cs::SynthesisError; use super::{AbsorbNonNative, AbsorbNonNativeGadget, Transcript, TranscriptVar}; @@ -15,15 +13,12 @@ use super::{AbsorbNonNative, AbsorbNonNativeGadget, Transcript, TranscriptVar}; impl Transcript for PoseidonSponge { // Compatible with the in-circuit `TranscriptVar::absorb_point` fn absorb_point>(&mut self, p: &C) { - let (x, y) = match p.into_affine().xy() { - Some((&x, &y)) => (x, y), - None => (C::BaseField::zero(), C::BaseField::zero()), - }; + let (x, y) = p.into_affine().xy().unwrap_or_default(); self.absorb(&x); self.absorb(&y); } - fn absorb_nonnative>(&mut self, v: &V) { - self.absorb(&v.to_native_sponge_field_elements_as_vec()); + fn absorb_nonnative(&mut self, v: &V) { + self.absorb(&v.to_native_sponge_field_elements_as_vec::()); } fn get_challenge(&mut self) -> F { let c = self.squeeze_field_elements(1); @@ -43,10 +38,7 @@ impl Transcript for PoseidonSponge { } impl TranscriptVar> for PoseidonSpongeVar { - fn absorb_point< - C: CurveGroup, - GC: CurveVar + ToConstraintFieldGadget, - >( + fn absorb_point, GC: CurveVar>( &mut self, v: &GC, ) -> Result<(), SynthesisError> { @@ -77,7 +69,7 @@ impl TranscriptVar> for PoseidonSpongeVar /// `GC.scalar_mul_le` method. fn get_challenge_nbits(&mut self, nbits: usize) -> Result>, SynthesisError> { let bits = self.squeeze_bits(nbits)?; - self.absorb(&Boolean::le_bits_to_fp_var(&bits)?)?; + self.absorb(&Boolean::le_bits_to_fp(&bits)?)?; Ok(bits) } fn get_challenges(&mut self, n: usize) -> Result>, SynthesisError> { @@ -119,11 +111,8 @@ pub fn poseidon_canonical_config() -> PoseidonConfig { #[cfg(test)] pub mod tests { - use crate::folding::circuits::nonnative::affine::NonNativeAffineVar; - - use super::*; use ark_bn254::{constraints::GVar, g1::Config, Fq, Fr, G1Projective as G1}; - use ark_ec::Group; + use ark_ec::PrimeGroup; use ark_ff::UniformRand; use ark_r1cs_std::{ alloc::AllocVar, groups::curves::short_weierstrass::ProjectiveVar, R1CSVar, @@ -131,9 +120,13 @@ pub mod tests { use ark_relations::r1cs::ConstraintSystem; use ark_std::test_rng; + use super::*; + use crate::folding::circuits::nonnative::affine::NonNativeAffineVar; + use crate::Error; + // Test with value taken from https://github.com/iden3/circomlibjs/blob/43cc582b100fc3459cf78d903a6f538e5d7f38ee/test/poseidon.js#L32 #[test] - fn check_against_circom_poseidon() { + fn check_against_circom_poseidon() -> Result<(), Error> { use ark_bn254::Fr; use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, CryptographicSponge}; use std::str::FromStr; @@ -142,8 +135,12 @@ pub mod tests { let mut poseidon_sponge: PoseidonSponge<_> = CryptographicSponge::new(&config); let v: Vec = vec!["1", "2", "3", "4"] .into_iter() - .map(|x| Fr::from_str(x).unwrap()) - .collect(); + .map(|x| { + Fr::from_str(x).map_err(|_| { + Error::ConversionError("str".to_string(), "Fr".to_string(), x.to_string()) + }) + }) + .collect::, Error>>()?; poseidon_sponge.absorb(&v); poseidon_sponge.squeeze_field_elements::(1); assert!( @@ -151,12 +148,19 @@ pub mod tests { == Fr::from_str( "18821383157269793795438455681495246036402687001665670618754263018637548127333" ) - .unwrap() + .map_err(|_| { + Error::ConversionError( + "str".to_string(), + "Fr".to_string(), + "hardcoded string".to_string(), + ) + })? ); + Ok(()) } #[test] - fn test_transcript_and_transcriptvar_absorb_native_point() { + fn test_transcript_and_transcriptvar_absorb_native_point() -> Result<(), Error> { // use 'native' transcript let config = poseidon_canonical_config::(); let mut tr = PoseidonSponge::::new(&config); @@ -172,17 +176,17 @@ pub mod tests { let p_var = ProjectiveVar::>::new_witness( ConstraintSystem::::new_ref(), || Ok(p), - ) - .unwrap(); - tr_var.absorb_point(&p_var).unwrap(); - let c_var = tr_var.get_challenge().unwrap(); + )?; + tr_var.absorb_point(&p_var)?; + let c_var = tr_var.get_challenge()?; // assert that native & gadget transcripts return the same challenge - assert_eq!(c, c_var.value().unwrap()); + assert_eq!(c, c_var.value()?); + Ok(()) } #[test] - fn test_transcript_and_transcriptvar_absorb_nonnative_point() { + fn test_transcript_and_transcriptvar_absorb_nonnative_point() -> Result<(), Error> { // use 'native' transcript let config = poseidon_canonical_config::(); let mut tr = PoseidonSponge::::new(&config); @@ -196,17 +200,17 @@ pub mod tests { let cs = ConstraintSystem::::new_ref(); let mut tr_var = PoseidonSpongeVar::::new(cs.clone(), &config); let p_var = - NonNativeAffineVar::::new_witness(ConstraintSystem::::new_ref(), || Ok(p)) - .unwrap(); - tr_var.absorb_nonnative(&p_var).unwrap(); - let c_var = tr_var.get_challenge().unwrap(); + NonNativeAffineVar::::new_witness(ConstraintSystem::::new_ref(), || Ok(p))?; + tr_var.absorb_nonnative(&p_var)?; + let c_var = tr_var.get_challenge()?; // assert that native & gadget transcripts return the same challenge - assert_eq!(c, c_var.value().unwrap()); + assert_eq!(c, c_var.value()?); + Ok(()) } #[test] - fn test_transcript_and_transcriptvar_get_challenge() { + fn test_transcript_and_transcriptvar_get_challenge() -> Result<(), Error> { // use 'native' transcript let config = poseidon_canonical_config::(); let mut tr = PoseidonSponge::::new(&config); @@ -216,16 +220,17 @@ pub mod tests { // use 'gadget' transcript let cs = ConstraintSystem::::new_ref(); let mut tr_var = PoseidonSpongeVar::::new(cs.clone(), &config); - let v = FpVar::::new_witness(cs.clone(), || Ok(Fr::from(42_u32))).unwrap(); - tr_var.absorb(&v).unwrap(); - let c_var = tr_var.get_challenge().unwrap(); + let v = FpVar::::new_witness(cs.clone(), || Ok(Fr::from(42_u32)))?; + tr_var.absorb(&v)?; + let c_var = tr_var.get_challenge()?; // assert that native & gadget transcripts return the same challenge - assert_eq!(c, c_var.value().unwrap()); + assert_eq!(c, c_var.value()?); + Ok(()) } #[test] - fn test_transcript_and_transcriptvar_nbits() { + fn test_transcript_and_transcriptvar_nbits() -> Result<(), Error> { let nbits = crate::constants::NOVA_N_BITS_RO; // use 'native' transcript @@ -239,36 +244,31 @@ pub mod tests { // use 'gadget' transcript let cs = ConstraintSystem::::new_ref(); let mut tr_var = PoseidonSpongeVar::::new(cs.clone(), &config); - let v = FpVar::::new_witness(cs.clone(), || Ok(Fq::from(42_u32))).unwrap(); - tr_var.absorb(&v).unwrap(); + let v = FpVar::::new_witness(cs.clone(), || Ok(Fq::from(42_u32)))?; + tr_var.absorb(&v)?; // get challenge from circuit transcript - let c_var = tr_var.get_challenge_nbits(nbits).unwrap(); + let c_var = tr_var.get_challenge_nbits(nbits)?; let P = G1::generator(); - let PVar = GVar::new_witness(cs.clone(), || Ok(P)).unwrap(); + let PVar = GVar::new_witness(cs.clone(), || Ok(P))?; // multiply point P by the challenge in different formats, to ensure that we get the same // result natively and in-circuit // native c*P - let c_Fr = Fr::from_bigint(BigInteger::from_bits_le(&c_bits)).unwrap(); + let c_Fr = Fr::from_bigint(BigInteger::from_bits_le(&c_bits)).ok_or(Error::OutOfBounds)?; let cP_native = P * c_Fr; // native c*P using mul_bits_be (notice the .rev to convert the LE to BE) let cP_native_bits = P.mul_bits_be(c_bits.into_iter().rev()); // in-circuit c*P using scalar_mul_le - let cPVar = PVar.scalar_mul_le(c_var.iter()).unwrap(); + let cPVar = PVar.scalar_mul_le(c_var.iter())?; // check that they are equal - assert_eq!( - cP_native.into_affine(), - cPVar.value().unwrap().into_affine() - ); - assert_eq!( - cP_native_bits.into_affine(), - cPVar.value().unwrap().into_affine() - ); + assert_eq!(cP_native.into_affine(), cPVar.value()?.into_affine()); + assert_eq!(cP_native_bits.into_affine(), cPVar.value()?.into_affine()); + Ok(()) } } diff --git a/folding-schemes/src/utils/espresso/sum_check/mod.rs b/folding-schemes/src/utils/espresso/sum_check/mod.rs index efb8b501..2d472250 100644 --- a/folding-schemes/src/utils/espresso/sum_check/mod.rs +++ b/folding-schemes/src/utils/espresso/sum_check/mod.rs @@ -12,6 +12,7 @@ use crate::{ transcript::Transcript, utils::virtual_polynomial::{VPAuxInfo, VirtualPolynomial}, + Error, }; use ark_crypto_primitives::sponge::Absorb; use ark_ff::PrimeField; @@ -22,7 +23,6 @@ use std::{fmt::Debug, marker::PhantomData, sync::Arc}; use crate::utils::sum_check::structs::IOPProverMessage; use crate::utils::sum_check::structs::IOPVerifierState; -use espresso_subroutines::poly_iop::prelude::PolyIOPErrors; use structs::{IOPProof, IOPProverState}; mod prover; @@ -47,7 +47,7 @@ pub trait SumCheck { fn prove( poly: &Self::VirtualPolynomial, transcript: &mut impl Transcript, - ) -> Result; + ) -> Result; /// Verify the claimed sum using the proof fn verify( @@ -55,20 +55,17 @@ pub trait SumCheck { proof: &Self::SumCheckProof, aux_info: &Self::VPAuxInfo, transcript: &mut impl Transcript, - ) -> Result; + ) -> Result; } /// Trait for sum check protocol prover side APIs. -pub trait SumCheckProver -where - Self: Sized, -{ +pub trait SumCheckProver: Sized { type VirtualPolynomial; type ProverMessage; /// Initialize the prover state to argue for the sum of the input polynomial /// over {0,1}^`num_vars`. - fn prover_init(polynomial: &Self::VirtualPolynomial) -> Result; + fn prover_init(polynomial: &Self::VirtualPolynomial) -> Result; /// Receive message from verifier, generate prover message, and proceed to /// next round. @@ -77,7 +74,7 @@ where fn prove_round_and_update_state( &mut self, challenge: &Option, - ) -> Result; + ) -> Result; } /// Trait for sum check protocol verifier side APIs. @@ -100,7 +97,7 @@ pub trait SumCheckVerifier { &mut self, prover_msg: &Self::ProverMessage, transcript: &mut impl Transcript, - ) -> Result; + ) -> Result; /// This function verifies the deferred checks in the interactive version of /// the protocol; and generate the subclaim. Returns an error if the @@ -113,7 +110,7 @@ pub trait SumCheckVerifier { fn check_and_generate_subclaim( &self, asserted_sum: &F, - ) -> Result; + ) -> Result; } /// A SumCheckSubClaim is a claim generated by the verifier at the end of @@ -153,7 +150,7 @@ impl> SumCheck for IOPSumCheck fn prove( poly: &VirtualPolynomial, transcript: &mut impl Transcript, - ) -> Result, PolyIOPErrors> { + ) -> Result, Error> { transcript.absorb(&F::from(poly.aux_info.num_variables as u64)); transcript.absorb(&F::from(poly.aux_info.max_degree as u64)); let mut prover_state: IOPProverState = IOPProverState::prover_init(poly)?; @@ -181,7 +178,7 @@ impl> SumCheck for IOPSumCheck proof: &IOPProof, aux_info: &VPAuxInfo, transcript: &mut impl Transcript, - ) -> Result, PolyIOPErrors> { + ) -> Result, Error> { transcript.absorb(&F::from(aux_info.num_variables as u64)); transcript.absorb(&F::from(aux_info.max_degree as u64)); let mut verifier_state = IOPVerifierState::verifier_init(aux_info); @@ -209,40 +206,42 @@ pub mod tests { use ark_pallas::Fr; use ark_poly::DenseMultilinearExtension; use ark_poly::MultilinearExtension; - use ark_std::test_rng; + use ark_std::{test_rng, Zero}; use crate::transcript::poseidon::poseidon_canonical_config; use crate::utils::sum_check::SumCheck; use crate::utils::virtual_polynomial::VirtualPolynomial; + use crate::Error; use super::IOPSumCheck; #[test] - pub fn sumcheck_poseidon() { + pub fn sumcheck_poseidon() -> Result<(), Error> { let n_vars = 5; let mut rng = test_rng(); let poly_mle = DenseMultilinearExtension::rand(n_vars, &mut rng); let virtual_poly = VirtualPolynomial::new_from_mle(&Arc::new(poly_mle), Fr::ONE); - sumcheck_poseidon_opt(virtual_poly); + let _ = sumcheck_poseidon_opt(virtual_poly)?; // test with zero poly let poly_mle = DenseMultilinearExtension::from_evaluations_vec( n_vars, - vec![Fr::ZERO; 2u32.pow(n_vars as u32) as usize], + vec![Fr::zero(); 2u32.pow(n_vars as u32) as usize], ); let virtual_poly = VirtualPolynomial::new_from_mle(&Arc::new(poly_mle), Fr::ONE); - sumcheck_poseidon_opt(virtual_poly); + let _ = sumcheck_poseidon_opt(virtual_poly)?; + Ok(()) } - fn sumcheck_poseidon_opt(virtual_poly: VirtualPolynomial) { + fn sumcheck_poseidon_opt(virtual_poly: VirtualPolynomial) -> Result<(), Error> { let poseidon_config = poseidon_canonical_config::(); // sum-check prove let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); let sum_check = - IOPSumCheck::>::prove(&virtual_poly, &mut transcript_p).unwrap(); + IOPSumCheck::>::prove(&virtual_poly, &mut transcript_p)?; // sum-check verify let claimed_sum = IOPSumCheck::>::extract_sum(&sum_check); @@ -255,5 +254,6 @@ pub mod tests { ); assert!(res_verify.is_ok()); + Ok(()) } } diff --git a/folding-schemes/src/utils/espresso/sum_check/prover.rs b/folding-schemes/src/utils/espresso/sum_check/prover.rs index 24440d24..d9824b37 100644 --- a/folding-schemes/src/utils/espresso/sum_check/prover.rs +++ b/folding-schemes/src/utils/espresso/sum_check/prover.rs @@ -10,9 +10,12 @@ //! Prover subroutines for a SumCheck protocol. use super::SumCheckProver; -use crate::utils::{ - lagrange_poly::compute_lagrange_interpolated_poly, multilinear_polynomial::fix_variables, - virtual_polynomial::VirtualPolynomial, +use crate::{ + utils::{ + lagrange_poly::compute_lagrange_interpolated_poly, multilinear_polynomial::fix_variables, + virtual_polynomial::VirtualPolynomial, + }, + Error, }; use ark_ff::{batch_inversion, PrimeField}; use ark_poly::DenseMultilinearExtension; @@ -21,7 +24,6 @@ use rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator}; use std::sync::Arc; use super::structs::{IOPProverMessage, IOPProverState}; -use espresso_subroutines::poly_iop::prelude::PolyIOPErrors; // #[cfg(feature = "parallel")] use rayon::iter::{IntoParallelRefMutIterator, ParallelIterator}; @@ -32,10 +34,10 @@ impl SumCheckProver for IOPProverState { /// Initialize the prover state to argue for the sum of the input polynomial /// over {0,1}^`num_vars`. - fn prover_init(polynomial: &Self::VirtualPolynomial) -> Result { + fn prover_init(polynomial: &Self::VirtualPolynomial) -> Result { let start = start_timer!(|| "sum check prover init"); if polynomial.aux_info.num_variables == 0 { - return Err(PolyIOPErrors::InvalidParameters( + return Err(Error::InvalidPolyIOPParameters( "Attempt to prove a constant.".to_string(), )); } @@ -62,13 +64,13 @@ impl SumCheckProver for IOPProverState { fn prove_round_and_update_state( &mut self, challenge: &Option, - ) -> Result { + ) -> Result { // let start = // start_timer!(|| format!("sum check prove {}-th round and update state", // self.round)); if self.round >= self.poly.aux_info.num_variables { - return Err(PolyIOPErrors::InvalidProver( + return Err(Error::InvalidPolyIOPProver( "Prover is not active".to_string(), )); } @@ -95,7 +97,7 @@ impl SumCheckProver for IOPProverState { if let Some(chal) = challenge { if self.round == 0 { - return Err(PolyIOPErrors::InvalidProver( + return Err(Error::InvalidPolyIOPProver( "first round should be prover first.".to_string(), )); } @@ -111,7 +113,7 @@ impl SumCheckProver for IOPProverState { // .iter_mut() // .for_each(|mle| *mle = fix_variables(mle, &[r])); } else if self.round > 0 { - return Err(PolyIOPErrors::InvalidProver( + return Err(Error::InvalidPolyIOPProver( "verifier message is empty".to_string(), )); } diff --git a/folding-schemes/src/utils/espresso/sum_check/verifier.rs b/folding-schemes/src/utils/espresso/sum_check/verifier.rs index 074eec18..7fb6e981 100644 --- a/folding-schemes/src/utils/espresso/sum_check/verifier.rs +++ b/folding-schemes/src/utils/espresso/sum_check/verifier.rs @@ -13,13 +13,12 @@ use super::{ structs::{IOPProverMessage, IOPVerifierState}, SumCheckSubClaim, SumCheckVerifier, }; -use crate::{transcript::Transcript, utils::virtual_polynomial::VPAuxInfo}; +use crate::{transcript::Transcript, utils::virtual_polynomial::VPAuxInfo, Error}; use ark_crypto_primitives::sponge::Absorb; use ark_ff::PrimeField; use ark_poly::Polynomial; use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; use ark_std::{end_timer, start_timer}; -use espresso_subroutines::poly_iop::prelude::PolyIOPErrors; #[cfg(feature = "parallel")] use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; @@ -48,12 +47,12 @@ impl SumCheckVerifier for IOPVerifierState { &mut self, prover_msg: & as SumCheckVerifier>::ProverMessage, transcript: &mut impl Transcript, - ) -> Result< as SumCheckVerifier>::Challenge, PolyIOPErrors> { + ) -> Result< as SumCheckVerifier>::Challenge, Error> { let start = start_timer!(|| format!("sum check verify {}-th round and update state", self.round)); if self.finished { - return Err(PolyIOPErrors::InvalidVerifier( + return Err(Error::InvalidPolyIOPVerifier( "Incorrect verifier state: Verifier is already finished.".to_string(), )); } @@ -84,16 +83,16 @@ impl SumCheckVerifier for IOPVerifierState { fn check_and_generate_subclaim( &self, asserted_sum: &F, - ) -> Result { + ) -> Result { let start = start_timer!(|| "sum check check and generate subclaim"); if !self.finished { - return Err(PolyIOPErrors::InvalidVerifier( + return Err(Error::InvalidPolyIOPVerifier( "Incorrect verifier state: Verifier has not finished.".to_string(), )); } if self.polynomials_received.len() != self.num_vars { - return Err(PolyIOPErrors::InvalidVerifier( + return Err(Error::InvalidPolyIOPVerifier( "insufficient rounds".to_string(), )); } @@ -109,9 +108,9 @@ impl SumCheckVerifier for IOPVerifierState { .map(|(coeffs, challenge)| { // Removed check on number of evaluations here since verifier receives polynomial in coeffs form let prover_poly = DensePolynomial::from_coefficients_slice(&coeffs); - Ok(prover_poly.evaluate(&challenge)) + prover_poly.evaluate(&challenge) }) - .collect::, PolyIOPErrors>>()?; + .collect::>(); #[cfg(not(feature = "parallel"))] let mut expected_vec = self @@ -122,9 +121,9 @@ impl SumCheckVerifier for IOPVerifierState { .map(|(coeffs, challenge)| { // Removed check on number of evaluations here since verifier receives polynomial in coeffs form let prover_poly = DensePolynomial::from_coefficients_slice(&coeffs); - Ok(prover_poly.evaluate(&challenge)) + prover_poly.evaluate(&challenge) }) - .collect::, PolyIOPErrors>>()?; + .collect::>(); // insert the asserted_sum to the first position of the expected vector expected_vec.insert(0, *asserted_sum); @@ -147,7 +146,7 @@ impl SumCheckVerifier for IOPVerifierState { // the deferred check during the interactive phase: // 1. check if the received 'P(0) + P(1) = expected`. if eval != expected { - return Err(PolyIOPErrors::InvalidProof( + return Err(Error::InvalidPolyIOPProof( "Prover message is not consistent with the claim.".to_string(), )); } @@ -172,7 +171,7 @@ impl SumCheckVerifier for IOPVerifierState { /// negligible compared to field operations. /// TODO: The quadratic term can be removed by precomputing the lagrange /// coefficients. -pub fn interpolate_uni_poly(p_i: &[F], eval_at: F) -> Result { +pub fn interpolate_uni_poly(p_i: &[F], eval_at: F) -> F { let start = start_timer!(|| "sum check interpolate uni poly opt"); let len = p_i.len(); @@ -269,7 +268,7 @@ pub fn interpolate_uni_poly(p_i: &[F], eval_at: F) -> Result VirtualPolynomial { let evals: Vec = self .flattened_ml_extensions .iter() - .map(|x| { - x.evaluate(point).unwrap() // safe unwrap here since we have - // already checked that num_var - // matches - }) + .map(|x| x.fix_variables(point)[0]) .collect(); let res = self @@ -414,6 +410,7 @@ pub fn bit_decompose(input: u64, num_var: usize) -> Vec { mod tests { use super::*; use crate::utils::multilinear_polynomial::tests::random_mle_list; + use crate::Error; use ark_ff::UniformRand; use ark_pallas::Fr; use ark_std::{ @@ -499,14 +496,15 @@ mod tests { } #[test] - fn test_eq_xr() { + fn test_eq_xr() -> Result<(), Error> { let mut rng = test_rng(); for nv in 4..10 { let r: Vec = (0..nv).map(|_| Fr::rand(&mut rng)).collect(); - let eq_x_r = build_eq_x_r(r.as_ref()).unwrap(); + let eq_x_r = build_eq_x_r(r.as_ref())?; let eq_x_r2 = build_eq_x_r_for_test(r.as_ref()); assert_eq!(eq_x_r, eq_x_r2); } + Ok(()) } /// Naive method to build eq(x, r). diff --git a/folding-schemes/src/utils/eth.rs b/folding-schemes/src/utils/eth.rs new file mode 100644 index 00000000..765d493f --- /dev/null +++ b/folding-schemes/src/utils/eth.rs @@ -0,0 +1,58 @@ +//! This module provides a trait and implementations for converting Rust types +//! to EVM calldata. +use ark_ec::{ + pairing::Pairing, + short_weierstrass::{Affine, Projective, SWCurveConfig}, + AffineRepr, CurveGroup, +}; +use ark_ff::{BigInteger, Fp, Fp2, Fp2Config, FpConfig, PrimeField}; +use ark_groth16::Proof; + +pub trait ToEth { + fn to_eth(&self) -> Vec; +} + +impl ToEth for [T] { + fn to_eth(&self) -> Vec { + self.iter().flat_map(ToEth::to_eth).collect() + } +} + +impl ToEth for u8 { + fn to_eth(&self) -> Vec { + vec![*self] + } +} + +impl, const N: usize> ToEth for Fp { + fn to_eth(&self) -> Vec { + self.into_bigint().to_bytes_be() + } +} + +impl> ToEth for Fp2

{ + fn to_eth(&self) -> Vec { + [self.c1.to_eth(), self.c0.to_eth()].concat() + } +} + +impl> ToEth for Affine

{ + fn to_eth(&self) -> Vec { + // the encoding of the additive identity is [0, 0] on the EVM + let (x, y) = self.xy().unwrap_or_default(); + + [x.to_eth(), y.to_eth()].concat() + } +} + +impl> ToEth for Projective

{ + fn to_eth(&self) -> Vec { + self.into_affine().to_eth() + } +} + +impl> ToEth for Proof { + fn to_eth(&self) -> Vec { + [self.a.to_eth(), self.b.to_eth(), self.c.to_eth()].concat() + } +} diff --git a/folding-schemes/src/utils/gadgets.rs b/folding-schemes/src/utils/gadgets.rs index e18a9e9d..7f0f7797 100644 --- a/folding-schemes/src/utils/gadgets.rs +++ b/folding-schemes/src/utils/gadgets.rs @@ -71,11 +71,8 @@ pub struct SparseMatrixVar { pub coeffs: Vec>, } -impl AllocVar, CF> for SparseMatrixVar -where - F: PrimeField, - CF: PrimeField, - FV: AllocVar, +impl> AllocVar, CF> + for SparseMatrixVar { fn new_variable>>( cs: impl Into>, diff --git a/folding-schemes/src/utils/lagrange_poly.rs b/folding-schemes/src/utils/lagrange_poly.rs index 22a38e1b..c2237108 100644 --- a/folding-schemes/src/utils/lagrange_poly.rs +++ b/folding-schemes/src/utils/lagrange_poly.rs @@ -53,7 +53,6 @@ mod tests { use ark_pallas::Fr; use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; use ark_std::UniformRand; - use espresso_subroutines::poly_iop::prelude::PolyIOPErrors; #[test] fn test_compute_lagrange_interpolated_poly() { @@ -76,7 +75,7 @@ mod tests { } #[test] - fn test_interpolation() -> Result<(), PolyIOPErrors> { + fn test_interpolation() { let mut prng = ark_std::test_rng(); // test a polynomial with 20 known points, i.e., with degree 19 @@ -86,10 +85,10 @@ mod tests { .collect::>(); let query = Fr::rand(&mut prng); - assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)?); + assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)); assert_eq!( compute_lagrange_interpolated_poly(&evals).evaluate(&query), - interpolate_uni_poly(&evals, query)? + interpolate_uni_poly(&evals, query) ); // test a polynomial with 33 known points, i.e., with degree 32 @@ -99,10 +98,10 @@ mod tests { .collect::>(); let query = Fr::rand(&mut prng); - assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)?); + assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)); assert_eq!( compute_lagrange_interpolated_poly(&evals).evaluate(&query), - interpolate_uni_poly(&evals, query)? + interpolate_uni_poly(&evals, query) ); // test a polynomial with 64 known points, i.e., with degree 63 @@ -112,12 +111,10 @@ mod tests { .collect::>(); let query = Fr::rand(&mut prng); - assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)?); + assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)); assert_eq!( compute_lagrange_interpolated_poly(&evals).evaluate(&query), - interpolate_uni_poly(&evals, query)? + interpolate_uni_poly(&evals, query) ); - - Ok(()) } } diff --git a/folding-schemes/src/utils/mle.rs b/folding-schemes/src/utils/mle.rs index 68c4aea8..c9c8b323 100644 --- a/folding-schemes/src/utils/mle.rs +++ b/folding-schemes/src/utils/mle.rs @@ -145,7 +145,7 @@ mod tests { for (i, A_row) in A_padded_dense.iter().enumerate() { for (j, _) in A_row.iter().enumerate() { let s_i_j = bhc.at_i(i * A_row.len() + j); - assert_eq!(A_mle.evaluate(&s_i_j).unwrap(), A_padded_dense[i][j]); + assert_eq!(A_mle.fix_variables(&s_i_j)[0], A_padded_dense[i][j]); } } } @@ -160,12 +160,12 @@ mod tests { let bhc = BooleanHypercube::new(z_mle.num_vars); for (i, z_i) in z.iter().enumerate() { let s_i = bhc.at_i(i); - assert_eq!(z_mle.evaluate(&s_i).unwrap(), z_i.clone()); + assert_eq!(z_mle.fix_variables(&s_i)[0], z_i.clone()); } // for the rest of elements of the boolean hypercube, expect it to evaluate to zero for i in (z.len())..(1 << z_mle.num_vars) { let s_i = bhc.at_i(i); - assert_eq!(z_mle.evaluate(&s_i).unwrap(), Fr::zero()); + assert_eq!(z_mle.fix_variables(&s_i)[0], Fr::zero()); } } diff --git a/folding-schemes/src/utils/mod.rs b/folding-schemes/src/utils/mod.rs index c63938fc..566ca0d3 100644 --- a/folding-schemes/src/utils/mod.rs +++ b/folding-schemes/src/utils/mod.rs @@ -2,16 +2,16 @@ use std::path::Path; use std::path::PathBuf; use ark_crypto_primitives::sponge::poseidon::PoseidonConfig; -use ark_ec::{AffineRepr, CurveGroup}; +use ark_ec::AffineRepr; use ark_ff::PrimeField; use ark_serialize::CanonicalSerialize; -use ark_std::Zero; use sha3::{Digest, Sha3_256}; use crate::arith::ArithSerializer; use crate::commitment::CommitmentScheme; -use crate::Error; +use crate::{Curve, Error}; +pub mod eth; pub mod gadgets; pub mod hypercube; pub mod lagrange_poly; @@ -36,11 +36,9 @@ pub fn powers_of(x: F, n: usize) -> Vec { /// returns the coordinates of a commitment point. This is compatible with the arkworks /// GC.to_constraint_field()[..2] -pub fn get_cm_coordinates(cm: &C) -> Vec { - let zero = (&C::BaseField::zero(), &C::BaseField::zero()); - let cm = cm.into_affine(); - let (cm_x, cm_y) = cm.xy().unwrap_or(zero); - vec![*cm_x, *cm_y] +pub fn get_cm_coordinates(cm: &C) -> Vec { + let (cm_x, cm_y) = cm.into_affine().xy().unwrap_or_default(); + vec![cm_x, cm_y] } /// returns the hash of the given public parameters of the Folding Scheme @@ -52,8 +50,8 @@ pub fn pp_hash( poseidon_config: &PoseidonConfig, ) -> Result where - C1: CurveGroup, - C2: CurveGroup, + C1: Curve, + C2: Curve, CS1: CommitmentScheme, CS2: CommitmentScheme, { diff --git a/folding-schemes/src/utils/vec.rs b/folding-schemes/src/utils/vec.rs index 5cf692ee..03978fdc 100644 --- a/folding-schemes/src/utils/vec.rs +++ b/folding-schemes/src/utils/vec.rs @@ -8,7 +8,7 @@ use ark_std::cfg_iter; use ark_std::rand::Rng; use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; -use crate::Error; +use crate::{folding::traits::Dummy, Error}; #[derive(Clone, Debug, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] pub struct SparseMatrix { @@ -19,14 +19,22 @@ pub struct SparseMatrix { pub coeffs: R1CSMatrix, } -impl SparseMatrix { - pub fn empty() -> Self { +impl Dummy<(usize, usize)> for SparseMatrix { + fn dummy((n_rows, n_cols): (usize, usize)) -> Self { Self { - n_rows: 0, - n_cols: 0, - coeffs: vec![], + n_rows, + n_cols, + // unnecessary to allocate each row as the matrix is sparse + coeffs: vec![vec![]; n_rows], } } +} + +impl SparseMatrix { + pub fn empty() -> Self { + Self::dummy((0, 0)) + } + pub fn rand(rng: &mut R, n_rows: usize, n_cols: usize) -> Self { const ZERO_VAL_PROBABILITY: f64 = 0.8f64; @@ -200,7 +208,7 @@ pub mod tests { // test mat_vec_mul & mat_vec_mul_sparse #[test] - fn test_mat_vec_mul() { + fn test_mat_vec_mul() -> Result<(), Error> { let A = to_F_matrix::(vec![ vec![0, 1, 0, 0, 0, 0], vec![0, 0, 0, 1, 0, 0], @@ -209,12 +217,9 @@ pub mod tests { ]) .to_dense(); let z = to_F_vec(vec![1, 3, 35, 9, 27, 30]); + assert_eq!(mat_vec_mul_dense(&A, &z)?, to_F_vec(vec![3, 9, 30, 35])); assert_eq!( - mat_vec_mul_dense(&A, &z).unwrap(), - to_F_vec(vec![3, 9, 30, 35]) - ); - assert_eq!( - mat_vec_mul(&dense_matrix_to_sparse(A), &z).unwrap(), + mat_vec_mul(&dense_matrix_to_sparse(A), &z)?, to_F_vec(vec![3, 9, 30, 35]) ); @@ -222,29 +227,26 @@ pub mod tests { let v = to_F_vec(vec![19, 55, 50, 3]); assert_eq!( - mat_vec_mul_dense(&A.to_dense(), &v).unwrap(), + mat_vec_mul_dense(&A.to_dense(), &v)?, to_F_vec(vec![418, 1158, 979]) ); - assert_eq!(mat_vec_mul(&A, &v).unwrap(), to_F_vec(vec![418, 1158, 979])); + assert_eq!(mat_vec_mul(&A, &v)?, to_F_vec(vec![418, 1158, 979])); + Ok(()) } #[test] - fn test_hadamard_product() { + fn test_hadamard_product() -> Result<(), Error> { let a = to_F_vec::(vec![1, 2, 3, 4, 5, 6]); let b = to_F_vec(vec![7, 8, 9, 10, 11, 12]); - assert_eq!( - hadamard(&a, &b).unwrap(), - to_F_vec(vec![7, 16, 27, 40, 55, 72]) - ); + assert_eq!(hadamard(&a, &b)?, to_F_vec(vec![7, 16, 27, 40, 55, 72])); + Ok(()) } #[test] - fn test_vec_add() { + fn test_vec_add() -> Result<(), Error> { let a: Vec = to_F_vec::(vec![1, 2, 3, 4, 5, 6]); let b: Vec = to_F_vec(vec![7, 8, 9, 10, 11, 12]); - assert_eq!( - vec_add(&a, &b).unwrap(), - to_F_vec(vec![8, 10, 12, 14, 16, 18]) - ); + assert_eq!(vec_add(&a, &b)?, to_F_vec(vec![8, 10, 12, 14, 16, 18])); + Ok(()) } } diff --git a/frontends/Cargo.toml b/frontends/Cargo.toml deleted file mode 100644 index dec4be2f..00000000 --- a/frontends/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "frontends" -version = "0.1.0" -edition = "2021" - -[dependencies] -ark-ff = { version = "^0.4.0", default-features = false, features = ["parallel", "asm"] } -ark-std = { version = "^0.4.0", default-features = false, features = ["parallel"] } -ark-relations = { version = "^0.4.0", default-features = false } -# ark-r1cs-std is patched at the workspace level -ark-r1cs-std = { version = "0.4.0", default-features = false, features = ["parallel"] } -ark-serialize = { version = "^0.4.0", default-features = false } -ark-circom = { git = "https://github.com/arnaucube/circom-compat", default-features = false } -num-bigint = "0.4" -ark-noname = { git = "https://github.com/dmpierre/ark-noname", branch = "feat/sonobe-integration" } -noname = { git = "https://github.com/dmpierre/noname" } -serde_json = "1.0.85" # to (de)serialize JSON -acvm = { git = "https://github.com/noir-lang/noir", rev="2b4853e", default-features = false } -noir_arkworks_backend = { package="arkworks_backend", git = "https://github.com/dmpierre/arkworks_backend", branch = "feat/sonobe-integration" } -folding-schemes = { path = "../folding-schemes/"} - -[dev-dependencies] -ark-bn254 = {version="0.4.0", features=["r1cs"]} - -# This allows the crate to be built when targeting WASM. -# See more at: https://docs.rs/getrandom/#webassembly-support -[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] -getrandom = { version = "0.2", features = ["js"] } - -[features] -default = ["ark-circom/default", "parallel"] -parallel = [] -wasm = ["ark-circom/wasm"] diff --git a/frontends/src/circom/test_folder/compile.sh b/frontends/src/circom/test_folder/compile.sh deleted file mode 100755 index 736c06c1..00000000 --- a/frontends/src/circom/test_folder/compile.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -circom ./frontends/src/circom/test_folder/cubic_circuit.circom --r1cs --sym --wasm --prime bn128 --output ./frontends/src/circom/test_folder/ -circom ./frontends/src/circom/test_folder/with_external_inputs.circom --r1cs --sym --wasm --prime bn128 --output ./frontends/src/circom/test_folder/ -circom ./frontends/src/circom/test_folder/no_external_inputs.circom --r1cs --sym --wasm --prime bn128 --output ./frontends/src/circom/test_folder/ diff --git a/frontends/src/noir/mod.rs b/frontends/src/noir/mod.rs deleted file mode 100644 index a423b608..00000000 --- a/frontends/src/noir/mod.rs +++ /dev/null @@ -1,306 +0,0 @@ -use std::collections::HashMap; - -use acvm::{ - acir::{ - acir_field::GenericFieldElement, - circuit::{Circuit, Program}, - native_types::{Witness as AcvmWitness, WitnessMap}, - }, - blackbox_solver::StubbedBlackBoxSolver, - pwg::ACVM, -}; -use ark_ff::PrimeField; -use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar}; -use ark_relations::r1cs::ConstraintSynthesizer; -use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; -use folding_schemes::{frontend::FCircuit, utils::PathOrBin, Error}; -use noir_arkworks_backend::{ - read_program_from_binary, read_program_from_file, sonobe_bridge::AcirCircuitSonobe, - FilesystemError, -}; - -#[derive(Clone, Debug)] -pub struct NoirFCircuit { - pub circuit: Circuit>, - pub state_len: usize, - pub external_inputs_len: usize, -} - -impl FCircuit for NoirFCircuit { - type Params = (PathOrBin, usize, usize); - - fn new(params: Self::Params) -> Result { - let (source, state_len, external_inputs_len) = params; - let program = match source { - PathOrBin::Path(path) => read_program_from_file(path), - PathOrBin::Bin(bytes) => read_program_from_binary(&bytes), - } - .map_err(|ee| Error::Other(format!("{:?}", ee)))?; - let circuit: Circuit> = program.functions[0].clone(); - let ivc_input_length = circuit.public_parameters.0.len(); - let ivc_return_length = circuit.return_values.0.len(); - - if ivc_input_length != ivc_return_length { - return Err(Error::NotSameLength( - "IVC input: ".to_string(), - ivc_input_length, - "IVC output: ".to_string(), - ivc_return_length, - )); - } - - Ok(NoirFCircuit { - circuit, - state_len, - external_inputs_len, - }) - } - - fn state_len(&self) -> usize { - self.state_len - } - - fn external_inputs_len(&self) -> usize { - self.external_inputs_len - } - - fn step_native( - &self, - _i: usize, - z_i: Vec, - external_inputs: Vec, // inputs that are not part of the state - ) -> Result, Error> { - let mut acvm = ACVM::new( - &StubbedBlackBoxSolver, - &self.circuit.opcodes, - WitnessMap::new(), - &[], - &[], - ); - - self.circuit - .public_parameters - .0 - .iter() - .map(|witness| { - let idx: usize = witness.as_usize(); - let value = z_i[idx].to_string(); - let witness = AcvmWitness(witness.witness_index()); - let f = GenericFieldElement::::try_from_str(&value) - .ok_or(SynthesisError::Unsatisfiable)?; - acvm.overwrite_witness(witness, f); - Ok(()) - }) - .collect::, SynthesisError>>()?; - - // write witness values for external_inputs - self.circuit - .private_parameters - .iter() - .map(|witness| { - let idx = witness.as_usize() - z_i.len(); - let value = external_inputs[idx].to_string(); - let f = GenericFieldElement::::try_from_str(&value) - .ok_or(SynthesisError::Unsatisfiable)?; - acvm.overwrite_witness(AcvmWitness(witness.witness_index()), f); - Ok(()) - }) - .collect::, SynthesisError>>()?; - let _ = acvm.solve(); - - let witness_map = acvm.finalize(); - - // get the z_{i+1} output state - let assigned_z_i1 = self - .circuit - .return_values - .0 - .iter() - .map(|witness| { - let noir_field_element = witness_map - .get(witness) - .ok_or(SynthesisError::AssignmentMissing)?; - Ok(noir_field_element.into_repr()) - }) - .collect::, SynthesisError>>()?; - - Ok(assigned_z_i1) - } - - fn generate_step_constraints( - &self, - cs: ConstraintSystemRef, - _i: usize, - z_i: Vec>, - external_inputs: Vec>, // inputs that are not part of the state - ) -> Result>, SynthesisError> { - let mut acvm = ACVM::new( - &StubbedBlackBoxSolver, - &self.circuit.opcodes, - WitnessMap::new(), - &[], - &[], - ); - - let mut already_assigned_witness_values = HashMap::new(); - - self.circuit - .public_parameters - .0 - .iter() - .map(|witness| { - let idx: usize = witness.as_usize(); - let witness = AcvmWitness(witness.witness_index()); - already_assigned_witness_values.insert(witness, &z_i[idx]); - let val = z_i[idx].value()?; - let value = if val == F::zero() { - "0".to_string() - } else { - val.to_string() - }; - - let f = GenericFieldElement::::try_from_str(&value) - .ok_or(SynthesisError::Unsatisfiable)?; - acvm.overwrite_witness(witness, f); - Ok(()) - }) - .collect::, SynthesisError>>()?; - - // write witness values for external_inputs - self.circuit - .private_parameters - .iter() - .map(|witness| { - let idx = witness.as_usize() - z_i.len(); - let witness = AcvmWitness(witness.witness_index()); - already_assigned_witness_values.insert(witness, &external_inputs[idx]); - - let val = external_inputs[idx].value()?; - let value = if val == F::zero() { - "0".to_string() - } else { - val.to_string() - }; - - let f = GenericFieldElement::::try_from_str(&value) - .ok_or(SynthesisError::Unsatisfiable)?; - acvm.overwrite_witness(witness, f); - Ok(()) - }) - .collect::, SynthesisError>>()?; - - // computes the witness - let _ = acvm.solve(); - let witness_map = acvm.finalize(); - - // get the z_{i+1} output state - let assigned_z_i1 = self - .circuit - .return_values - .0 - .iter() - .map(|witness| { - let noir_field_element = witness_map - .get(witness) - .ok_or(SynthesisError::AssignmentMissing)?; - FpVar::::new_witness(cs.clone(), || Ok(noir_field_element.into_repr())) - }) - .collect::>, SynthesisError>>()?; - - // initialize circuit and set already assigned values - let mut acir_circuit = AcirCircuitSonobe::from((&self.circuit, witness_map)); - acir_circuit.already_assigned_witnesses = already_assigned_witness_values; - - acir_circuit.generate_constraints(cs.clone())?; - - Ok(assigned_z_i1) - } -} - -pub fn load_noir_circuit( - path: String, -) -> Result>, FilesystemError> { - let program: Program> = read_program_from_file(path)?; - let circuit: Circuit> = program.functions[0].clone(); - Ok(circuit) -} - -#[cfg(test)] -mod tests { - use crate::noir::load_noir_circuit; - use ark_bn254::Fr; - use ark_r1cs_std::R1CSVar; - use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar}; - use ark_relations::r1cs::ConstraintSystem; - use folding_schemes::frontend::FCircuit; - use std::env; - - use crate::noir::NoirFCircuit; - - #[test] - fn test_step_native() { - let cur_path = env::current_dir().unwrap(); - let circuit_path = format!( - "{}/src/noir/test_folder/test_circuit/target/test_circuit.json", - cur_path.to_str().unwrap() - ); - let circuit = load_noir_circuit(circuit_path).unwrap(); - let noirfcircuit = NoirFCircuit { - circuit, - state_len: 2, - external_inputs_len: 2, - }; - let inputs = vec![Fr::from(2), Fr::from(5)]; - let res = noirfcircuit.step_native(0, inputs.clone(), inputs); - assert!(res.is_ok()); - assert_eq!(res.unwrap(), vec![Fr::from(4), Fr::from(25)]); - } - - #[test] - fn test_step_constraints() { - let cs = ConstraintSystem::::new_ref(); - let cur_path = env::current_dir().unwrap(); - let circuit_path = format!( - "{}/src/noir/test_folder/test_circuit/target/test_circuit.json", - cur_path.to_str().unwrap() - ); - let circuit = load_noir_circuit(circuit_path).unwrap(); - let noirfcircuit = NoirFCircuit { - circuit, - state_len: 2, - external_inputs_len: 2, - }; - let inputs = vec![Fr::from(2), Fr::from(5)]; - let z_i = Vec::>::new_witness(cs.clone(), || Ok(inputs.clone())).unwrap(); - let external_inputs = Vec::>::new_witness(cs.clone(), || Ok(inputs)).unwrap(); - let output = noirfcircuit - .generate_step_constraints(cs.clone(), 0, z_i, external_inputs) - .unwrap(); - assert_eq!(output[0].value().unwrap(), Fr::from(4)); - assert_eq!(output[1].value().unwrap(), Fr::from(25)); - } - - #[test] - fn test_step_constraints_no_external_inputs() { - let cs = ConstraintSystem::::new_ref(); - let cur_path = env::current_dir().unwrap(); - let circuit_path = format!( - "{}/src/noir/test_folder/test_no_external_inputs/target/test_no_external_inputs.json", - cur_path.to_str().unwrap() - ); - let circuit = load_noir_circuit(circuit_path).unwrap(); - let noirfcircuit = NoirFCircuit { - circuit, - state_len: 2, - external_inputs_len: 0, - }; - let inputs = vec![Fr::from(2), Fr::from(5)]; - let z_i = Vec::>::new_witness(cs.clone(), || Ok(inputs.clone())).unwrap(); - let external_inputs = vec![]; - let output = noirfcircuit - .generate_step_constraints(cs.clone(), 0, z_i, external_inputs) - .unwrap(); - assert_eq!(output[0].value().unwrap(), Fr::from(4)); - assert_eq!(output[1].value().unwrap(), Fr::from(25)); - } -} diff --git a/solidity-verifiers/Cargo.toml b/solidity-verifiers/Cargo.toml index e165016b..db26d064 100644 --- a/solidity-verifiers/Cargo.toml +++ b/solidity-verifiers/Cargo.toml @@ -4,43 +4,37 @@ version = "0.1.0" edition = "2021" [dependencies] -ark-ec = "0.4" -ark-ff = "0.4" -ark-poly = "0.4" -ark-std = "0.4" -ark-groth16 = "0.4" +ark-groth16 = "^0.5.0" +ark-bn254 = { version = "^0.5.0", default-features = false, features = ["r1cs"] } +ark-poly-commit = "^0.5.0" +ark-serialize = "^0.5.0" askama = { version = "0.12.0", features = ["config"], default-features = false } -ark-bn254 = "0.4.0" -ark-poly-commit = "0.4.0" -itertools = "0.12.1" -ark-serialize = "0.4.1" revm = {version="3.5.0", default-features=false, features=["std"]} rust-crypto = "0.2" num-bigint = "0.4.3" folding-schemes = { path = "../folding-schemes/"} # without 'light-test' enabled [dev-dependencies] -ark-crypto-primitives = "0.4.0" -ark-r1cs-std = "0.4.0" -ark-relations = "0.4.0" -tracing = { version = "0.1", default-features = false, features = [ "attributes" ] } -tracing-subscriber = { version = "0.2" } -ark-bn254 = {version="0.4.0", features=["r1cs"]} -ark-grumpkin = {version="0.4.0", features=["r1cs"]} -rand = "0.8.5" +ark-ec = { version = "^0.5.0", default-features = false, features = ["parallel"] } +ark-ff = { version = "^0.5.0", default-features = false, features = ["parallel", "asm"] } +ark-std = { version = "^0.5.0", default-features = false, features = ["parallel"] } +ark-crypto-primitives = { version = "^0.5.0", default-features = false, features = ["sponge", "parallel"] } +ark-snark = { version = "^0.5.0", default-features = false } +ark-relations = { version = "^0.5.0", default-features = false } +ark-r1cs-std = { version = "^0.5.0", default-features = false, features = ["parallel"] } +ark-grumpkin = { version = "^0.5.0", default-features = false, features = ["r1cs"] } folding-schemes = { path = "../folding-schemes/", features=["light-test"]} -frontends = { path = "../frontends/"} +experimental-frontends = { path = "../experimental-frontends/"} noname = { git = "https://github.com/dmpierre/noname" } [features] default = ["parallel"] parallel = [ - "ark-std/parallel", - "ark-ff/parallel", - "ark-poly/parallel", - ] - + "ark-groth16/parallel", + "ark-poly-commit/parallel", + "folding-schemes/parallel", +] [[example]] name = "full_flow" diff --git a/solidity-verifiers/src/verifiers/g16.rs b/solidity-verifiers/src/verifiers/g16.rs index eda66fa3..116f4650 100644 --- a/solidity-verifiers/src/verifiers/g16.rs +++ b/solidity-verifiers/src/verifiers/g16.rs @@ -79,14 +79,13 @@ mod tests { ProtocolVerifierKey, }; use ark_bn254::{Bn254, Fr}; - use ark_crypto_primitives::snark::SNARK; use ark_ec::AffineRepr; use ark_ff::{BigInt, BigInteger, PrimeField}; use ark_groth16::Groth16; + use ark_snark::SNARK; use ark_std::rand::{RngCore, SeedableRng}; use ark_std::test_rng; use askama::Template; - use itertools::chain; use super::Groth16Verifier; use crate::verifiers::tests::{setup, DEFAULT_SETUP_LEN}; @@ -121,19 +120,19 @@ mod tests { let (a_x, a_y) = proof.a.xy().unwrap(); let (b_x, b_y) = proof.b.xy().unwrap(); let (c_x, c_y) = proof.c.xy().unwrap(); - let mut calldata: Vec = chain![ - FUNCTION_SELECTOR_GROTH16_VERIFY_PROOF, - a_x.into_bigint().to_bytes_be(), - a_y.into_bigint().to_bytes_be(), - b_x.c1.into_bigint().to_bytes_be(), - b_x.c0.into_bigint().to_bytes_be(), - b_y.c1.into_bigint().to_bytes_be(), - b_y.c0.into_bigint().to_bytes_be(), - c_x.into_bigint().to_bytes_be(), - c_y.into_bigint().to_bytes_be(), - BigInt::from(Fr::from(circuit.z)).to_bytes_be(), + let mut calldata: Vec = [ + &FUNCTION_SELECTOR_GROTH16_VERIFY_PROOF[..], + &a_x.into_bigint().to_bytes_be(), + &a_y.into_bigint().to_bytes_be(), + &b_x.c1.into_bigint().to_bytes_be(), + &b_x.c0.into_bigint().to_bytes_be(), + &b_y.c1.into_bigint().to_bytes_be(), + &b_y.c0.into_bigint().to_bytes_be(), + &c_x.into_bigint().to_bytes_be(), + &c_y.into_bigint().to_bytes_be(), + &BigInt::from(Fr::from(circuit.z)).to_bytes_be(), ] - .collect(); + .concat(); let (_, output) = evm.call(verifier_address, calldata.clone()); assert_eq!(*output.last().unwrap(), 1); diff --git a/solidity-verifiers/src/verifiers/kzg.rs b/solidity-verifiers/src/verifiers/kzg.rs index 9dc156d5..532c9ac3 100644 --- a/solidity-verifiers/src/verifiers/kzg.rs +++ b/solidity-verifiers/src/verifiers/kzg.rs @@ -86,7 +86,6 @@ mod tests { use ark_std::Zero; use ark_std::{test_rng, UniformRand}; use askama::Template; - use itertools::chain; use folding_schemes::{ commitment::{kzg::KZG, CommitmentScheme}, @@ -161,16 +160,16 @@ mod tests { let x = transcript_v.get_challenge(); let x = x.into_bigint().to_bytes_be(); - let mut calldata: Vec = chain![ - FUNCTION_SELECTOR_KZG10_CHECK, - x_comm.into_bigint().to_bytes_be(), - y_comm.into_bigint().to_bytes_be(), - x_proof.into_bigint().to_bytes_be(), - y_proof.into_bigint().to_bytes_be(), - x.clone(), - y, + let mut calldata: Vec = [ + &FUNCTION_SELECTOR_KZG10_CHECK[..], + &x_comm.into_bigint().to_bytes_be(), + &y_comm.into_bigint().to_bytes_be(), + &x_proof.into_bigint().to_bytes_be(), + &y_proof.into_bigint().to_bytes_be(), + &x, + &y, ] - .collect(); + .concat(); let (_, output) = evm.call(verifier_address, calldata.clone()); assert_eq!(*output.last().unwrap(), 1); diff --git a/solidity-verifiers/src/verifiers/mod.rs b/solidity-verifiers/src/verifiers/mod.rs index 8a3bf0d6..a61ee5da 100644 --- a/solidity-verifiers/src/verifiers/mod.rs +++ b/solidity-verifiers/src/verifiers/mod.rs @@ -53,7 +53,6 @@ pub trait ProtocolVerifierKey: CanonicalDeserialize + CanonicalSerialize { #[cfg(test)] pub mod tests { use ark_bn254::{Bn254, Fr, G1Projective as G1}; - use ark_crypto_primitives::snark::CircuitSpecificSetupSNARK; use ark_ff::PrimeField; use ark_groth16::Groth16; use ark_poly_commit::kzg10::VerifierKey as KZGVerifierKey; @@ -61,6 +60,7 @@ pub mod tests { use ark_r1cs_std::eq::EqGadget; use ark_r1cs_std::fields::fp::FpVar; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; + use ark_snark::CircuitSpecificSetupSNARK; use ark_std::rand::{RngCore, SeedableRng}; use ark_std::test_rng; use std::marker::PhantomData; diff --git a/solidity-verifiers/src/verifiers/nova_cyclefold.rs b/solidity-verifiers/src/verifiers/nova_cyclefold.rs index 6df52654..28d180ba 100644 --- a/solidity-verifiers/src/verifiers/nova_cyclefold.rs +++ b/solidity-verifiers/src/verifiers/nova_cyclefold.rs @@ -139,14 +139,13 @@ impl NovaCycleFoldVerifierKey { #[cfg(test)] mod tests { - use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as G1}; + use ark_bn254::{Bn254, Fr, G1Projective as G1}; use ark_ff::PrimeField; use ark_groth16::Groth16; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as G2}; + use ark_grumpkin::Projective as G2; use ark_r1cs_std::alloc::AllocVar; use ark_r1cs_std::fields::fp::FpVar; use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; - use ark_std::Zero; use askama::Template; use std::marker::PhantomData; use std::time::Instant; @@ -174,18 +173,9 @@ mod tests { NovaCycleFoldVerifierKey, ProtocolVerifierKey, }; - type NOVA = Nova, Pedersen, false>; - type DECIDER = DeciderEth< - G1, - GVar, - G2, - GVar2, - FC, - KZG<'static, Bn254>, - Pedersen, - Groth16, - NOVA, - >; + type NOVA = Nova, Pedersen, false>; + type DECIDER = + DeciderEth, Pedersen, Groth16, NOVA>; type FS_PP = as FoldingScheme>::ProverParam; type FS_VP = as FoldingScheme>::VerifierParam; @@ -199,29 +189,20 @@ mod tests { } impl FCircuit for CubicFCircuit { type Params = (); + type ExternalInputs = (); + type ExternalInputsVar = (); fn new(_params: Self::Params) -> Result { Ok(Self { _f: PhantomData }) } fn state_len(&self) -> usize { 1 } - fn external_inputs_len(&self) -> usize { - 0 - } - fn step_native( - &self, - _i: usize, - z_i: Vec, - _external_inputs: Vec, - ) -> Result, Error> { - Ok(vec![z_i[0] * z_i[0] * z_i[0] + z_i[0] + F::from(5_u32)]) - } fn generate_step_constraints( &self, cs: ConstraintSystemRef, _i: usize, z_i: Vec>, - _external_inputs: Vec>, + _external_inputs: Self::ExternalInputsVar, ) -> Result>, SynthesisError> { let five = FpVar::::new_constant(cs.clone(), F::from(5u32))?; let z_i = z_i[0].clone(); @@ -241,6 +222,8 @@ mod tests { } impl FCircuit for MultiInputsFCircuit { type Params = (); + type ExternalInputs = (); + type ExternalInputsVar = (); fn new(_params: Self::Params) -> Result { Ok(Self { _f: PhantomData }) @@ -248,34 +231,13 @@ mod tests { fn state_len(&self) -> usize { 5 } - fn external_inputs_len(&self) -> usize { - 0 - } - - /// computes the next state values in place, assigning z_{i+1} into z_i, and computing the new - /// z_{i+1} - fn step_native( - &self, - _i: usize, - z_i: Vec, - _external_inputs: Vec, - ) -> Result, Error> { - let a = z_i[0] + F::from(4_u32); - let b = z_i[1] + F::from(40_u32); - let c = z_i[2] * F::from(4_u32); - let d = z_i[3] * F::from(40_u32); - let e = z_i[4] + F::from(100_u32); - - Ok(vec![a, b, c, d, e]) - } - /// generates the constraints for the step of F for the given z_i fn generate_step_constraints( &self, cs: ConstraintSystemRef, _i: usize, z_i: Vec>, - _external_inputs: Vec>, + _external_inputs: Self::ExternalInputsVar, ) -> Result>, SynthesisError> { let four = FpVar::::new_constant(cs.clone(), F::from(4u32))?; let forty = FpVar::::new_constant(cs.clone(), F::from(40u32))?; @@ -332,7 +294,7 @@ mod tests { #[allow(clippy::type_complexity)] fn init_params>( ) -> ((FS_PP, FS_VP), (DECIDER_PP, DECIDER_VP)) { - let mut rng = rand::rngs::OsRng; + let mut rng = ark_std::rand::rngs::OsRng; let poseidon_config = poseidon_canonical_config::(); let f_circuit = FC::new(()).unwrap(); @@ -342,14 +304,9 @@ mod tests { f_circuit.clone(), ); let nova_params = NOVA::preprocess(&mut rng, &prep_param).unwrap(); - let nova = NOVA::init( - &nova_params, - f_circuit.clone(), - vec![Fr::zero(); f_circuit.state_len()].clone(), - ) - .unwrap(); let decider_params = - DECIDER::preprocess(&mut rng, nova_params.clone(), nova.clone()).unwrap(); + DECIDER::::preprocess(&mut rng, (nova_params.clone(), f_circuit.state_len())) + .unwrap(); (nova_params, decider_params) } @@ -375,11 +332,12 @@ mod tests { let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from((decider_vp.clone(), f_circuit.state_len())); - let mut rng = rand::rngs::OsRng; + let mut rng = ark_std::rand::rngs::OsRng; let mut nova = NOVA::::init(&fs_params, f_circuit, z_0).unwrap(); for _ in 0..n_steps { - nova.prove_step(&mut rng, vec![], None).unwrap(); + nova.prove_step(&mut rng, FC::ExternalInputs::default(), None) + .unwrap(); } let start = Instant::now();