Address Clippy warnings
This commit is contained in:
Родитель
eb969d5dcf
Коммит
2bfc333896
|
@ -12,7 +12,7 @@ use merlin::Transcript;
|
|||
|
||||
pub fn main() {
|
||||
// the list of number of variables (and constraints) in an R1CS instance
|
||||
let inst_sizes = vec![12, 16, 20];
|
||||
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
|
||||
|
||||
println!("Profiler:: NIZK");
|
||||
for &s in inst_sizes.iter() {
|
||||
|
|
|
@ -12,7 +12,7 @@ use merlin::Transcript;
|
|||
|
||||
pub fn main() {
|
||||
// the list of number of variables (and constraints) in an R1CS instance
|
||||
let inst_sizes = vec![12, 16, 20];
|
||||
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
|
||||
|
||||
println!("Profiler:: SNARK");
|
||||
for &s in inst_sizes.iter() {
|
||||
|
|
|
@ -71,39 +71,13 @@ impl Commitments for Scalar {
|
|||
impl Commitments for Vec<Scalar> {
|
||||
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
|
||||
assert!(gens_n.n == self.len());
|
||||
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + blind * &gens_n.h
|
||||
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + blind * gens_n.h
|
||||
}
|
||||
}
|
||||
|
||||
impl Commitments for [Scalar] {
|
||||
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
|
||||
assert_eq!(gens_n.n, self.len());
|
||||
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + blind * &gens_n.h
|
||||
}
|
||||
}
|
||||
|
||||
impl Commitments for Vec<bool> {
|
||||
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
|
||||
assert!(gens_n.n == self.len());
|
||||
let mut comm = blind * &gens_n.h;
|
||||
for i in 0..self.len() {
|
||||
if self[i] {
|
||||
comm = comm + gens_n.G[i];
|
||||
}
|
||||
}
|
||||
comm
|
||||
}
|
||||
}
|
||||
|
||||
impl Commitments for [bool] {
|
||||
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
|
||||
assert!(gens_n.n == self.len());
|
||||
let mut comm = blind * &gens_n.h;
|
||||
for i in 0..self.len() {
|
||||
if self[i] {
|
||||
comm = comm + gens_n.G[i];
|
||||
}
|
||||
}
|
||||
comm
|
||||
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + blind * gens_n.h
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#![allow(clippy::too_many_arguments)]
|
||||
use super::commitments::{Commitments, MultiCommitGens};
|
||||
use super::errors::ProofVerifyError;
|
||||
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
|
||||
|
@ -17,7 +18,7 @@ use rayon::prelude::*;
|
|||
pub struct DensePolynomial {
|
||||
num_vars: usize, //the number of variables in the multilinear polynomial
|
||||
len: usize,
|
||||
Z: Vec<Scalar>, // a vector that holds the evaluations of the polynomial in all the 2^num_vars Boolean inputs
|
||||
Z: Vec<Scalar>, // evaluations of the polynomial in all the 2^num_vars Boolean inputs
|
||||
}
|
||||
|
||||
pub struct PolyCommitmentGens {
|
||||
|
@ -73,7 +74,7 @@ impl EqPolynomial {
|
|||
EqPolynomial { r }
|
||||
}
|
||||
|
||||
pub fn evaluate(&self, rx: &Vec<Scalar>) -> Scalar {
|
||||
pub fn evaluate(&self, rx: &[Scalar]) -> Scalar {
|
||||
assert_eq!(self.r.len(), rx.len());
|
||||
(0..rx.len())
|
||||
.map(|i| self.r[i] * rx[i] + (Scalar::one() - self.r[i]) * (Scalar::one() - rx[i]))
|
||||
|
@ -87,12 +88,10 @@ impl EqPolynomial {
|
|||
let mut size = 1;
|
||||
for j in 0..ell {
|
||||
// in each iteration, we double the size of chis
|
||||
size = size * 2;
|
||||
size *= 2;
|
||||
for i in (0..size).rev().step_by(2) {
|
||||
// copy each element from the prior iteration twice
|
||||
let scalar = evals[i / 2];
|
||||
// evals[i - 1] = scalar * (Scalar::one() - tau[j]);
|
||||
// evals[i] = scalar * tau[j];
|
||||
evals[i] = scalar * self.r[j];
|
||||
evals[i - 1] = scalar - evals[i];
|
||||
}
|
||||
|
@ -124,7 +123,7 @@ impl IdentityPolynomial {
|
|||
IdentityPolynomial { size_point }
|
||||
}
|
||||
|
||||
pub fn evaluate(&self, r: &Vec<Scalar>) -> Scalar {
|
||||
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
|
||||
let len = r.len();
|
||||
assert_eq!(len, self.size_point);
|
||||
(0..len)
|
||||
|
@ -178,7 +177,7 @@ impl DensePolynomial {
|
|||
}
|
||||
|
||||
#[cfg(not(feature = "rayon_par"))]
|
||||
fn commit_inner(&self, blinds: &Vec<Scalar>, gens: &MultiCommitGens) -> PolyCommitment {
|
||||
fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment {
|
||||
let L_size = blinds.len();
|
||||
let R_size = self.Z.len() / L_size;
|
||||
assert_eq!(L_size * R_size, self.Z.len());
|
||||
|
@ -207,42 +206,43 @@ impl DensePolynomial {
|
|||
let R_size = right_num_vars.pow2();
|
||||
assert_eq!(L_size * R_size, n);
|
||||
|
||||
let blinds = match hiding {
|
||||
true => PolyCommitmentBlinds {
|
||||
let blinds = if hiding {
|
||||
PolyCommitmentBlinds {
|
||||
blinds: random_tape.unwrap().random_vector(b"poly_blinds", L_size),
|
||||
},
|
||||
false => PolyCommitmentBlinds {
|
||||
}
|
||||
} else {
|
||||
PolyCommitmentBlinds {
|
||||
blinds: vec![Scalar::zero(); L_size],
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
(self.commit_inner(&blinds.blinds, &gens.gens.gens_n), blinds)
|
||||
}
|
||||
|
||||
pub fn bound(&self, L: &Vec<Scalar>) -> Vec<Scalar> {
|
||||
pub fn bound(&self, L: &[Scalar]) -> Vec<Scalar> {
|
||||
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(self.get_num_vars());
|
||||
let L_size = left_num_vars.pow2();
|
||||
let R_size = right_num_vars.pow2();
|
||||
(0..R_size)
|
||||
.map(|i| (0..L_size).map(|j| &L[j] * &self.Z[j * R_size + i]).sum())
|
||||
.map(|i| (0..L_size).map(|j| L[j] * self.Z[j * R_size + i]).sum())
|
||||
.collect::<Vec<Scalar>>()
|
||||
}
|
||||
|
||||
pub fn bound_poly_var_top(&mut self, r: &Scalar) {
|
||||
let n = self.len() / 2;
|
||||
for i in 0..n {
|
||||
self.Z[i] = &self.Z[i] + r * (&self.Z[i + n] - &self.Z[i]);
|
||||
self.Z[i] = self.Z[i] + r * (self.Z[i + n] - self.Z[i]);
|
||||
}
|
||||
self.num_vars = self.num_vars - 1;
|
||||
self.num_vars -= 1;
|
||||
self.len = n;
|
||||
}
|
||||
|
||||
pub fn bound_poly_var_bot(&mut self, r: &Scalar) {
|
||||
let n = self.len() / 2;
|
||||
for i in 0..n {
|
||||
self.Z[i] = &self.Z[2 * i] + r * (&self.Z[2 * i + 1] - &self.Z[2 * i]);
|
||||
self.Z[i] = self.Z[2 * i] + r * (self.Z[2 * i + 1] - self.Z[2 * i]);
|
||||
}
|
||||
self.num_vars = self.num_vars - 1;
|
||||
self.num_vars -= 1;
|
||||
self.len = n;
|
||||
}
|
||||
|
||||
|
@ -250,13 +250,13 @@ impl DensePolynomial {
|
|||
assert_eq!(self.len(), other.len());
|
||||
let mut res = Scalar::zero();
|
||||
for i in 0..self.len() {
|
||||
res = &res + &self.Z[i] * &other[i];
|
||||
res += self.Z[i] * other[i];
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
// returns Z(r) in O(n) time
|
||||
pub fn evaluate(&self, r: &Vec<Scalar>) -> Scalar {
|
||||
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
|
||||
// r must have a value for each variable
|
||||
assert_eq!(r.len(), self.get_num_vars());
|
||||
let chis = EqPolynomial::new(r.to_vec()).evals();
|
||||
|
@ -274,8 +274,8 @@ impl DensePolynomial {
|
|||
let other_vec = other.vec();
|
||||
assert_eq!(other_vec.len(), self.len);
|
||||
self.Z.extend(other_vec);
|
||||
self.num_vars = self.num_vars + 1;
|
||||
self.len = 2 * self.len;
|
||||
self.num_vars += 1;
|
||||
self.len *= 2;
|
||||
assert_eq!(self.Z.len(), self.len);
|
||||
}
|
||||
|
||||
|
@ -283,12 +283,8 @@ impl DensePolynomial {
|
|||
where
|
||||
I: IntoIterator<Item = &'a DensePolynomial>,
|
||||
{
|
||||
//assert!(polys.len() > 0);
|
||||
//let num_vars = polys[0].num_vars();
|
||||
let mut Z: Vec<Scalar> = Vec::new();
|
||||
for poly in polys.into_iter() {
|
||||
//assert_eq!(poly.get_num_vars(), num_vars); // ensure each polynomial has the same number of variables
|
||||
//assert_eq!(poly.len, poly.vec().len()); // ensure no variable is already bound
|
||||
Z.extend(poly.vec());
|
||||
}
|
||||
|
||||
|
@ -298,7 +294,7 @@ impl DensePolynomial {
|
|||
DensePolynomial::new(Z)
|
||||
}
|
||||
|
||||
pub fn from_usize(Z: &Vec<usize>) -> Self {
|
||||
pub fn from_usize(Z: &[usize]) -> Self {
|
||||
DensePolynomial::new(
|
||||
(0..Z.len())
|
||||
.map(|i| Scalar::from(Z[i] as u64))
|
||||
|
@ -339,7 +335,7 @@ impl PolyEvalProof {
|
|||
pub fn prove(
|
||||
poly: &DensePolynomial,
|
||||
blinds_opt: Option<&PolyCommitmentBlinds>,
|
||||
r: &Vec<Scalar>, // point at which the polynomial is evaluated
|
||||
r: &[Scalar], // point at which the polynomial is evaluated
|
||||
Zr: &Scalar, // evaluation of \widetilde{Z}(r)
|
||||
blind_Zr_opt: Option<&Scalar>, // specifies a blind for Zr
|
||||
gens: &PolyCommitmentGens,
|
||||
|
@ -401,7 +397,7 @@ impl PolyEvalProof {
|
|||
&self,
|
||||
gens: &PolyCommitmentGens,
|
||||
transcript: &mut Transcript,
|
||||
r: &Vec<Scalar>, // point at which the polynomial is evaluated
|
||||
r: &[Scalar], // point at which the polynomial is evaluated
|
||||
C_Zr: &CompressedGroup, // commitment to \widetilde{Z}(r)
|
||||
comm: &PolyCommitment,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
|
@ -425,8 +421,8 @@ impl PolyEvalProof {
|
|||
&self,
|
||||
gens: &PolyCommitmentGens,
|
||||
transcript: &mut Transcript,
|
||||
r: &Vec<Scalar>, // point at which the polynomial is evaluated
|
||||
Zr: &Scalar, // evaluation \widetilde{Z}(r)
|
||||
r: &[Scalar], // point at which the polynomial is evaluated
|
||||
Zr: &Scalar, // evaluation \widetilde{Z}(r)
|
||||
comm: &PolyCommitment,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
// compute a commitment to Zr with a blind of zero
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use std::fmt;
|
||||
use core::fmt;
|
||||
|
||||
pub struct ProofVerifyError;
|
||||
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
#![allow(non_snake_case)]
|
||||
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
use super::super::errors::ProofVerifyError;
|
||||
use super::super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
|
||||
use super::super::math::Math;
|
||||
use super::super::scalar::Scalar;
|
||||
use super::super::transcript::ProofTranscript;
|
||||
use core::iter;
|
||||
use merlin::Transcript;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::iter;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BulletReductionProof {
|
||||
|
@ -29,12 +30,12 @@ impl BulletReductionProof {
|
|||
pub fn prove(
|
||||
transcript: &mut Transcript,
|
||||
Q: &GroupElement,
|
||||
G_vec: &Vec<GroupElement>,
|
||||
G_vec: &[GroupElement],
|
||||
H: &GroupElement,
|
||||
a_vec: &Vec<Scalar>,
|
||||
b_vec: &Vec<Scalar>,
|
||||
a_vec: &[Scalar],
|
||||
b_vec: &[Scalar],
|
||||
blind: &Scalar,
|
||||
blinds_vec: &Vec<(Scalar, Scalar)>,
|
||||
blinds_vec: &[(Scalar, Scalar)],
|
||||
) -> (
|
||||
BulletReductionProof,
|
||||
GroupElement,
|
||||
|
@ -46,9 +47,9 @@ impl BulletReductionProof {
|
|||
// Create slices G, H, a, b backed by their respective
|
||||
// vectors. This lets us reslice as we compress the lengths
|
||||
// of the vectors in the main loop below.
|
||||
let mut G = &mut G_vec.clone()[..];
|
||||
let mut a = &mut a_vec.clone()[..];
|
||||
let mut b = &mut b_vec.clone()[..];
|
||||
let mut G = &mut G_vec.to_owned()[..];
|
||||
let mut a = &mut a_vec.to_owned()[..];
|
||||
let mut b = &mut b_vec.to_owned()[..];
|
||||
|
||||
// All of the input vectors must have a length that is a power of two.
|
||||
let mut n = G.len();
|
||||
|
@ -72,7 +73,7 @@ impl BulletReductionProof {
|
|||
let mut blind_fin = *blind;
|
||||
|
||||
while n != 1 {
|
||||
n = n / 2;
|
||||
n /= 2;
|
||||
let (a_L, a_R) = a.split_at_mut(n);
|
||||
let (b_L, b_R) = b.split_at_mut(n);
|
||||
let (G_L, G_R) = G.split_at_mut(n);
|
||||
|
@ -110,7 +111,7 @@ impl BulletReductionProof {
|
|||
G_L[i] = GroupElement::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]);
|
||||
}
|
||||
|
||||
blind_fin = blind_fin + blind_L * &u * &u + blind_R * &u_inv * &u_inv;
|
||||
blind_fin = blind_fin + blind_L * u * u + blind_R * u_inv * u_inv;
|
||||
|
||||
L_vec.push(L.compress());
|
||||
R_vec.push(R.compress());
|
||||
|
@ -124,10 +125,7 @@ impl BulletReductionProof {
|
|||
GroupElement::vartime_multiscalar_mul(&[a[0], a[0] * b[0], blind_fin], &[G[0], *Q, *H]);
|
||||
|
||||
(
|
||||
BulletReductionProof {
|
||||
L_vec: L_vec,
|
||||
R_vec: R_vec,
|
||||
},
|
||||
BulletReductionProof { L_vec, R_vec },
|
||||
Gamma_hat,
|
||||
a[0],
|
||||
b[0],
|
||||
|
@ -196,7 +194,7 @@ impl BulletReductionProof {
|
|||
pub fn verify(
|
||||
&self,
|
||||
n: usize,
|
||||
a: &Vec<Scalar>,
|
||||
a: &[Scalar],
|
||||
transcript: &mut Transcript,
|
||||
Gamma: &GroupElement,
|
||||
G: &[GroupElement],
|
||||
|
|
107
src/nizk/mod.rs
107
src/nizk/mod.rs
|
@ -1,3 +1,4 @@
|
|||
#![allow(clippy::too_many_arguments)]
|
||||
use super::commitments::{Commitments, MultiCommitGens};
|
||||
use super::errors::ProofVerifyError;
|
||||
use super::group::CompressedGroup;
|
||||
|
@ -44,8 +45,8 @@ impl KnowledgeProof {
|
|||
|
||||
let c = transcript.challenge_scalar(b"c");
|
||||
|
||||
let z1 = x * &c + &t1;
|
||||
let z2 = r * &c + &t2;
|
||||
let z1 = x * c + t1;
|
||||
let z2 = r * c + t2;
|
||||
|
||||
(KnowledgeProof { alpha, z1, z2 }, C)
|
||||
}
|
||||
|
@ -63,7 +64,7 @@ impl KnowledgeProof {
|
|||
let c = transcript.challenge_scalar(b"c");
|
||||
|
||||
let lhs = self.z1.commit(&self.z2, gens_n).compress();
|
||||
let rhs = (&c * C.decompress().expect("Could not decompress C")
|
||||
let rhs = (c * C.decompress().expect("Could not decompress C")
|
||||
+ self
|
||||
.alpha
|
||||
.decompress()
|
||||
|
@ -109,12 +110,12 @@ impl EqualityProof {
|
|||
let C2 = v2.commit(&s2, gens_n).compress();
|
||||
C2.append_to_transcript(b"C2", transcript);
|
||||
|
||||
let alpha = (&r * gens_n.h).compress();
|
||||
let alpha = (r * gens_n.h).compress();
|
||||
alpha.append_to_transcript(b"alpha", transcript);
|
||||
|
||||
let c = transcript.challenge_scalar(b"c");
|
||||
|
||||
let z = &c * (s1 - s2) + &r;
|
||||
let z = c * (s1 - s2) + r;
|
||||
|
||||
(EqualityProof { alpha, z }, C1, C2)
|
||||
}
|
||||
|
@ -133,11 +134,11 @@ impl EqualityProof {
|
|||
|
||||
let c = transcript.challenge_scalar(b"c");
|
||||
let rhs = {
|
||||
let C = &C1.decompress().unwrap() - &C2.decompress().unwrap();
|
||||
(&c * C + &self.alpha.decompress().unwrap()).compress()
|
||||
let C = C1.decompress().unwrap() - C2.decompress().unwrap();
|
||||
(c * C + self.alpha.decompress().unwrap()).compress()
|
||||
};
|
||||
|
||||
let lhs = (&self.z * gens_n.h).compress();
|
||||
let lhs = (self.z * gens_n.h).compress();
|
||||
|
||||
if lhs == rhs {
|
||||
Ok(())
|
||||
|
@ -212,11 +213,11 @@ impl ProductProof {
|
|||
|
||||
let c = transcript.challenge_scalar(b"c");
|
||||
|
||||
let z1 = &b1 + &c * x;
|
||||
let z2 = &b2 + &c * rX;
|
||||
let z3 = &b3 + &c * y;
|
||||
let z4 = &b4 + &c * rY;
|
||||
let z5 = &b5 + &c * (rZ - rX * y);
|
||||
let z1 = b1 + c * x;
|
||||
let z2 = b2 + c * rX;
|
||||
let z3 = b3 + c * y;
|
||||
let z4 = b4 + c * rY;
|
||||
let z5 = b5 + c * (rZ - rX * y);
|
||||
let z = [z1, z2, z3, z4, z5];
|
||||
|
||||
(
|
||||
|
@ -243,11 +244,7 @@ impl ProductProof {
|
|||
let lhs = (P.decompress().unwrap() + c * X.decompress().unwrap()).compress();
|
||||
let rhs = z1.commit(&z2, gens_n).compress();
|
||||
|
||||
if lhs == rhs {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
lhs == rhs
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
|
@ -311,9 +308,9 @@ impl DotProductProof {
|
|||
b"dot product proof"
|
||||
}
|
||||
|
||||
pub fn compute_dotproduct(a: &Vec<Scalar>, b: &Vec<Scalar>) -> Scalar {
|
||||
pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
|
||||
assert_eq!(a.len(), b.len());
|
||||
(0..a.len()).map(|i| &a[i] * &b[i]).sum()
|
||||
(0..a.len()).map(|i| a[i] * b[i]).sum()
|
||||
}
|
||||
|
||||
pub fn prove(
|
||||
|
@ -321,46 +318,46 @@ impl DotProductProof {
|
|||
gens_n: &MultiCommitGens,
|
||||
transcript: &mut Transcript,
|
||||
random_tape: &mut RandomTape,
|
||||
x: &Vec<Scalar>,
|
||||
r_x: &Scalar,
|
||||
a: &Vec<Scalar>,
|
||||
x_vec: &[Scalar],
|
||||
blind_x: &Scalar,
|
||||
a_vec: &[Scalar],
|
||||
y: &Scalar,
|
||||
r_y: &Scalar,
|
||||
blind_y: &Scalar,
|
||||
) -> (DotProductProof, CompressedGroup, CompressedGroup) {
|
||||
transcript.append_protocol_name(DotProductProof::protocol_name());
|
||||
|
||||
let n = x.len();
|
||||
assert_eq!(x.len(), a.len());
|
||||
assert_eq!(gens_n.n, a.len());
|
||||
let n = x_vec.len();
|
||||
assert_eq!(x_vec.len(), a_vec.len());
|
||||
assert_eq!(gens_n.n, a_vec.len());
|
||||
assert_eq!(gens_1.n, 1);
|
||||
|
||||
// produce randomness for the proofs
|
||||
let d = random_tape.random_vector(b"d", n);
|
||||
let d_vec = random_tape.random_vector(b"d_vec", n);
|
||||
let r_delta = random_tape.random_scalar(b"r_delta");
|
||||
let r_beta = random_tape.random_scalar(b"r_beta");
|
||||
|
||||
let Cx = x.commit(&r_x, gens_n).compress();
|
||||
let Cx = x_vec.commit(&blind_x, gens_n).compress();
|
||||
Cx.append_to_transcript(b"Cx", transcript);
|
||||
|
||||
let Cy = y.commit(&r_y, gens_1).compress();
|
||||
let Cy = y.commit(&blind_y, gens_1).compress();
|
||||
Cy.append_to_transcript(b"Cy", transcript);
|
||||
|
||||
let delta = d.commit(&r_delta, gens_n).compress();
|
||||
let delta = d_vec.commit(&r_delta, gens_n).compress();
|
||||
delta.append_to_transcript(b"delta", transcript);
|
||||
|
||||
let dotproduct_a_d = DotProductProof::compute_dotproduct(&a, &d);
|
||||
let dotproduct_a_d = DotProductProof::compute_dotproduct(&a_vec, &d_vec);
|
||||
|
||||
let beta = dotproduct_a_d.commit(&r_beta, gens_1).compress();
|
||||
beta.append_to_transcript(b"beta", transcript);
|
||||
|
||||
let c = transcript.challenge_scalar(b"c");
|
||||
|
||||
let z = (0..d.len())
|
||||
.map(|i| c * x[i] + d[i])
|
||||
let z = (0..d_vec.len())
|
||||
.map(|i| c * x_vec[i] + d_vec[i])
|
||||
.collect::<Vec<Scalar>>();
|
||||
|
||||
let z_delta = c * r_x + r_delta;
|
||||
let z_beta = c * r_y + r_beta;
|
||||
let z_delta = c * blind_x + r_delta;
|
||||
let z_beta = c * blind_y + r_beta;
|
||||
|
||||
(
|
||||
DotProductProof {
|
||||
|
@ -380,7 +377,7 @@ impl DotProductProof {
|
|||
gens_1: &MultiCommitGens,
|
||||
gens_n: &MultiCommitGens,
|
||||
transcript: &mut Transcript,
|
||||
a: &Vec<Scalar>,
|
||||
a: &[Scalar],
|
||||
Cx: &CompressedGroup,
|
||||
Cy: &CompressedGroup,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
|
@ -395,11 +392,11 @@ impl DotProductProof {
|
|||
|
||||
let c = transcript.challenge_scalar(b"c");
|
||||
|
||||
let mut result = &c * Cx.decompress().unwrap() + self.delta.decompress().unwrap()
|
||||
let mut result = c * Cx.decompress().unwrap() + self.delta.decompress().unwrap()
|
||||
== self.z.commit(&self.z_delta, gens_n);
|
||||
|
||||
let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, &a);
|
||||
result &= &c * Cy.decompress().unwrap() + self.beta.decompress().unwrap()
|
||||
result &= c * Cy.decompress().unwrap() + self.beta.decompress().unwrap()
|
||||
== dotproduct_z_a.commit(&self.z_beta, gens_1);
|
||||
|
||||
if result {
|
||||
|
@ -437,25 +434,25 @@ impl DotProductProofLog {
|
|||
b"dot product proof (log)"
|
||||
}
|
||||
|
||||
pub fn compute_dotproduct(a: &Vec<Scalar>, b: &Vec<Scalar>) -> Scalar {
|
||||
pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
|
||||
assert_eq!(a.len(), b.len());
|
||||
(0..a.len()).map(|i| &a[i] * &b[i]).sum()
|
||||
(0..a.len()).map(|i| a[i] * b[i]).sum()
|
||||
}
|
||||
|
||||
pub fn prove(
|
||||
gens: &DotProductProofGens,
|
||||
transcript: &mut Transcript,
|
||||
random_tape: &mut RandomTape,
|
||||
x: &Vec<Scalar>,
|
||||
r_x: &Scalar,
|
||||
a: &Vec<Scalar>,
|
||||
x_vec: &[Scalar],
|
||||
blind_x: &Scalar,
|
||||
a_vec: &[Scalar],
|
||||
y: &Scalar,
|
||||
r_y: &Scalar,
|
||||
blind_y: &Scalar,
|
||||
) -> (DotProductProofLog, CompressedGroup, CompressedGroup) {
|
||||
transcript.append_protocol_name(DotProductProofLog::protocol_name());
|
||||
|
||||
let n = x.len();
|
||||
assert_eq!(x.len(), a.len());
|
||||
let n = x_vec.len();
|
||||
assert_eq!(x_vec.len(), a_vec.len());
|
||||
assert_eq!(gens.n, n);
|
||||
|
||||
// produce randomness for generating a proof
|
||||
|
@ -470,22 +467,22 @@ impl DotProductProofLog {
|
|||
.collect::<Vec<(Scalar, Scalar)>>()
|
||||
};
|
||||
|
||||
let Cx = x.commit(&r_x, &gens.gens_n).compress();
|
||||
let Cx = x_vec.commit(&blind_x, &gens.gens_n).compress();
|
||||
Cx.append_to_transcript(b"Cx", transcript);
|
||||
|
||||
let Cy = y.commit(&r_y, &gens.gens_1).compress();
|
||||
let Cy = y.commit(&blind_y, &gens.gens_1).compress();
|
||||
Cy.append_to_transcript(b"Cy", transcript);
|
||||
|
||||
let r_Gamma = r_x + r_y;
|
||||
let blind_Gamma = blind_x + blind_y;
|
||||
let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) =
|
||||
BulletReductionProof::prove(
|
||||
transcript,
|
||||
&gens.gens_1.G[0],
|
||||
&gens.gens_n.G,
|
||||
&gens.gens_n.h,
|
||||
x,
|
||||
a,
|
||||
&r_Gamma,
|
||||
x_vec,
|
||||
a_vec,
|
||||
&blind_Gamma,
|
||||
&blinds_vec,
|
||||
);
|
||||
let y_hat = x_hat * a_hat;
|
||||
|
@ -526,7 +523,7 @@ impl DotProductProofLog {
|
|||
n: usize,
|
||||
gens: &DotProductProofGens,
|
||||
transcript: &mut Transcript,
|
||||
a: &Vec<Scalar>,
|
||||
a: &[Scalar],
|
||||
Cx: &CompressedGroup,
|
||||
Cy: &CompressedGroup,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
|
@ -557,7 +554,7 @@ impl DotProductProofLog {
|
|||
let z2_s = &self.z2;
|
||||
|
||||
let lhs = ((Gamma_hat * c_s + beta_s) * a_hat_s + delta_s).compress();
|
||||
let rhs = ((g_hat + &gens.gens_1.G[0] * a_hat_s) * z1_s + gens.gens_1.h * z2_s).compress();
|
||||
let rhs = ((g_hat + gens.gens_1.G[0] * a_hat_s) * z1_s + gens.gens_1.h * z2_s).compress();
|
||||
|
||||
assert_eq!(lhs, rhs);
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#[allow(dead_code)]
|
||||
#![allow(dead_code)]
|
||||
use super::dense_mlpoly::DensePolynomial;
|
||||
use super::dense_mlpoly::EqPolynomial;
|
||||
use super::math::Math;
|
||||
|
@ -21,10 +21,10 @@ impl ProductCircuit {
|
|||
) -> (DensePolynomial, DensePolynomial) {
|
||||
let len = inp_left.len() + inp_right.len();
|
||||
let outp_left = (0..len / 4)
|
||||
.map(|i| &inp_left[i] * &inp_right[i])
|
||||
.map(|i| inp_left[i] * inp_right[i])
|
||||
.collect::<Vec<Scalar>>();
|
||||
let outp_right = (len / 4..len / 2)
|
||||
.map(|i| &inp_left[i] * &inp_right[i])
|
||||
.map(|i| inp_left[i] * inp_right[i])
|
||||
.collect::<Vec<Scalar>>();
|
||||
|
||||
(
|
||||
|
@ -82,7 +82,7 @@ impl DotProductCircuit {
|
|||
|
||||
pub fn evaluate(&self) -> Scalar {
|
||||
(0..self.left.len())
|
||||
.map(|i| &self.left[i] * &self.right[i] * &self.weight[i])
|
||||
.map(|i| self.left[i] * self.right[i] * self.weight[i])
|
||||
.sum()
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ impl ProductCircuitEvalProof {
|
|||
|
||||
// produce a random challenge
|
||||
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
|
||||
claim = &claims_prod[0] + &r_layer * (&claims_prod[1] - &claims_prod[0]);
|
||||
claim = claims_prod[0] + r_layer * (claims_prod[1] - claims_prod[0]);
|
||||
|
||||
let mut ext = vec![r_layer];
|
||||
ext.extend(rand_prod);
|
||||
|
@ -246,7 +246,7 @@ impl ProductCircuitEvalProof {
|
|||
// produce a random challenge
|
||||
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
|
||||
claim = (Scalar::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1];
|
||||
num_rounds = num_rounds + 1;
|
||||
num_rounds += 1;
|
||||
let mut ext = vec![r_layer];
|
||||
ext.extend(rand_prod);
|
||||
rand = ext;
|
||||
|
@ -262,7 +262,7 @@ impl ProductCircuitEvalProofBatched {
|
|||
dotp_circuit_vec: &mut Vec<&mut DotProductCircuit>,
|
||||
transcript: &mut Transcript,
|
||||
) -> (Self, Vec<Scalar>) {
|
||||
assert!(prod_circuit_vec.len() > 0);
|
||||
assert!(!prod_circuit_vec.is_empty());
|
||||
|
||||
let mut claims_dotp_final = (Vec::new(), Vec::new(), Vec::new());
|
||||
|
||||
|
@ -302,13 +302,13 @@ impl ProductCircuitEvalProofBatched {
|
|||
let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
|
||||
let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
|
||||
let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
|
||||
if layer_id == 0 && dotp_circuit_vec.len() > 0 {
|
||||
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
|
||||
// add additional claims
|
||||
for i in 0..dotp_circuit_vec.len() {
|
||||
claims_to_verify.push(dotp_circuit_vec[i].evaluate());
|
||||
assert_eq!(len / 2, dotp_circuit_vec[i].left.len());
|
||||
assert_eq!(len / 2, dotp_circuit_vec[i].right.len());
|
||||
assert_eq!(len / 2, dotp_circuit_vec[i].weight.len());
|
||||
for item in dotp_circuit_vec.iter() {
|
||||
claims_to_verify.push(item.evaluate());
|
||||
assert_eq!(len / 2, item.left.len());
|
||||
assert_eq!(len / 2, item.right.len());
|
||||
assert_eq!(len / 2, item.weight.len());
|
||||
}
|
||||
|
||||
for dotp_circuit in dotp_circuit_vec.iter_mut() {
|
||||
|
@ -346,7 +346,7 @@ impl ProductCircuitEvalProofBatched {
|
|||
transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]);
|
||||
}
|
||||
|
||||
if layer_id == 0 && dotp_circuit_vec.len() > 0 {
|
||||
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
|
||||
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = claims_dotp;
|
||||
for i in 0..dotp_circuit_vec.len() {
|
||||
transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]);
|
||||
|
@ -360,7 +360,7 @@ impl ProductCircuitEvalProofBatched {
|
|||
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
|
||||
|
||||
claims_to_verify = (0..prod_circuit_vec.len())
|
||||
.map(|i| &claims_prod_left[i] + &r_layer * (&claims_prod_right[i] - &claims_prod_left[i]))
|
||||
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
|
||||
.collect::<Vec<Scalar>>();
|
||||
|
||||
let mut ext = vec![r_layer];
|
||||
|
@ -385,8 +385,8 @@ impl ProductCircuitEvalProofBatched {
|
|||
|
||||
pub fn verify(
|
||||
&self,
|
||||
claims_prod_vec: &Vec<Scalar>,
|
||||
claims_dotp_vec: &Vec<Scalar>,
|
||||
claims_prod_vec: &[Scalar],
|
||||
claims_dotp_vec: &[Scalar],
|
||||
len: usize,
|
||||
transcript: &mut Transcript,
|
||||
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
|
||||
|
@ -395,7 +395,7 @@ impl ProductCircuitEvalProofBatched {
|
|||
let mut num_rounds = 0;
|
||||
assert_eq!(self.proof.len(), num_layers);
|
||||
|
||||
let mut claims_to_verify = claims_prod_vec.clone();
|
||||
let mut claims_to_verify = claims_prod_vec.to_owned();
|
||||
let mut claims_to_verify_dotp: Vec<Scalar> = Vec::new();
|
||||
for i in 0..num_layers {
|
||||
if i == num_layers - 1 {
|
||||
|
@ -442,11 +442,10 @@ impl ProductCircuitEvalProofBatched {
|
|||
transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]);
|
||||
transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]);
|
||||
|
||||
claim_expected = &claim_expected
|
||||
+ &coeff_vec[i + num_prod_instances]
|
||||
* &claims_dotp_left[i]
|
||||
* &claims_dotp_right[i]
|
||||
* &claims_dotp_weight[i];
|
||||
claim_expected += coeff_vec[i + num_prod_instances]
|
||||
* claims_dotp_left[i]
|
||||
* claims_dotp_right[i]
|
||||
* claims_dotp_weight[i];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -456,7 +455,7 @@ impl ProductCircuitEvalProofBatched {
|
|||
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
|
||||
|
||||
claims_to_verify = (0..claims_prod_left.len())
|
||||
.map(|i| &claims_prod_left[i] + &r_layer * (&claims_prod_right[i] - &claims_prod_left[i]))
|
||||
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
|
||||
.collect::<Vec<Scalar>>();
|
||||
|
||||
// add claims to verify for dotp circuit
|
||||
|
@ -465,21 +464,21 @@ impl ProductCircuitEvalProofBatched {
|
|||
|
||||
for i in 0..claims_dotp_vec.len() / 2 {
|
||||
// combine left claims
|
||||
let claim_left = &claims_dotp_left[2 * i]
|
||||
+ &r_layer * (&claims_dotp_left[2 * i + 1] - &claims_dotp_left[2 * i]);
|
||||
let claim_left = claims_dotp_left[2 * i]
|
||||
+ r_layer * (claims_dotp_left[2 * i + 1] - claims_dotp_left[2 * i]);
|
||||
|
||||
let claim_right = &claims_dotp_right[2 * i]
|
||||
+ &r_layer * (&claims_dotp_right[2 * i + 1] - &claims_dotp_right[2 * i]);
|
||||
let claim_right = claims_dotp_right[2 * i]
|
||||
+ r_layer * (claims_dotp_right[2 * i + 1] - claims_dotp_right[2 * i]);
|
||||
|
||||
let claim_weight = &claims_dotp_weight[2 * i]
|
||||
+ &r_layer * (&claims_dotp_weight[2 * i + 1] - &claims_dotp_weight[2 * i]);
|
||||
let claim_weight = claims_dotp_weight[2 * i]
|
||||
+ r_layer * (claims_dotp_weight[2 * i + 1] - claims_dotp_weight[2 * i]);
|
||||
claims_to_verify_dotp.push(claim_left);
|
||||
claims_to_verify_dotp.push(claim_right);
|
||||
claims_to_verify_dotp.push(claim_weight);
|
||||
}
|
||||
}
|
||||
|
||||
num_rounds = num_rounds + 1;
|
||||
num_rounds += 1;
|
||||
let mut ext = vec![r_layer];
|
||||
ext.extend(rand_prod);
|
||||
rand = ext;
|
||||
|
|
|
@ -162,7 +162,7 @@ impl R1CSInstance {
|
|||
assert_eq!(num_vars.log2().pow2(), num_vars);
|
||||
|
||||
// num_inputs + 1 <= num_vars
|
||||
assert!(num_inputs + 1 <= num_vars);
|
||||
assert!(num_inputs < num_vars);
|
||||
|
||||
// z is organized as [vars,1,io]
|
||||
let size_z = num_vars + num_inputs + 1;
|
||||
|
@ -218,12 +218,12 @@ impl R1CSInstance {
|
|||
(inst, Z[0..num_vars].to_vec(), Z[num_vars + 1..].to_vec())
|
||||
}
|
||||
|
||||
pub fn is_sat(&self, vars: &Vec<Scalar>, input: &Vec<Scalar>) -> bool {
|
||||
pub fn is_sat(&self, vars: &[Scalar], input: &[Scalar]) -> bool {
|
||||
assert_eq!(vars.len(), self.num_vars);
|
||||
assert_eq!(input.len(), self.num_inputs);
|
||||
|
||||
let z = {
|
||||
let mut z = vars.clone();
|
||||
let mut z = vars.to_vec();
|
||||
z.extend(&vec![Scalar::one()]);
|
||||
z.extend(input);
|
||||
z
|
||||
|
@ -246,18 +246,15 @@ impl R1CSInstance {
|
|||
let res: usize = (0..self.num_cons)
|
||||
.map(|i| if Az[i] * Bz[i] == Cz[i] { 0 } else { 1 })
|
||||
.sum();
|
||||
if res > 0 {
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
|
||||
res == 0
|
||||
}
|
||||
|
||||
pub fn multiply_vec(
|
||||
&self,
|
||||
num_rows: usize,
|
||||
num_cols: usize,
|
||||
z: &Vec<Scalar>,
|
||||
z: &[Scalar],
|
||||
) -> (DensePolynomial, DensePolynomial, DensePolynomial) {
|
||||
assert_eq!(num_rows, self.num_cons);
|
||||
assert_eq!(z.len(), num_cols);
|
||||
|
@ -273,7 +270,7 @@ impl R1CSInstance {
|
|||
&self,
|
||||
num_rows: usize,
|
||||
num_cols: usize,
|
||||
evals: &Vec<Scalar>,
|
||||
evals: &[Scalar],
|
||||
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
|
||||
assert_eq!(num_rows, self.num_cons);
|
||||
assert!(num_cols > self.num_vars);
|
||||
|
@ -287,8 +284,8 @@ impl R1CSInstance {
|
|||
|
||||
pub fn evaluate_with_tables(
|
||||
&self,
|
||||
evals_rx: &Vec<Scalar>,
|
||||
evals_ry: &Vec<Scalar>,
|
||||
evals_rx: &[Scalar],
|
||||
evals_ry: &[Scalar],
|
||||
) -> R1CSInstanceEvals {
|
||||
R1CSInstanceEvals {
|
||||
eval_A_r: self.A.evaluate_with_tables(evals_rx, evals_ry),
|
||||
|
@ -300,8 +297,7 @@ impl R1CSInstance {
|
|||
pub fn commit(&self, gens: &R1CSCommitmentGens) -> (R1CSCommitment, R1CSDecommitment) {
|
||||
assert_eq!(self.A.get_num_nz_entries(), self.B.get_num_nz_entries());
|
||||
assert_eq!(self.A.get_num_nz_entries(), self.C.get_num_nz_entries());
|
||||
let (comm, dense) =
|
||||
SparseMatPolynomial::multi_commit(&vec![&self.A, &self.B, &self.C], &gens.gens);
|
||||
let (comm, dense) = SparseMatPolynomial::multi_commit(&[&self.A, &self.B, &self.C], &gens.gens);
|
||||
let r1cs_comm = R1CSCommitment {
|
||||
num_cons: self.num_cons,
|
||||
num_vars: self.num_vars,
|
||||
|
@ -323,8 +319,8 @@ pub struct R1CSEvalProof {
|
|||
impl R1CSEvalProof {
|
||||
pub fn prove(
|
||||
decomm: &R1CSDecommitment,
|
||||
rx: &Vec<Scalar>, // point at which the polynomial is evaluated
|
||||
ry: &Vec<Scalar>,
|
||||
rx: &[Scalar], // point at which the polynomial is evaluated
|
||||
ry: &[Scalar],
|
||||
evals: &R1CSInstanceEvals,
|
||||
gens: &R1CSCommitmentGens,
|
||||
transcript: &mut Transcript,
|
||||
|
@ -335,7 +331,7 @@ impl R1CSEvalProof {
|
|||
&decomm.dense,
|
||||
rx,
|
||||
ry,
|
||||
&vec![evals.eval_A_r, evals.eval_B_r, evals.eval_C_r],
|
||||
&[evals.eval_A_r, evals.eval_B_r, evals.eval_C_r],
|
||||
&gens.gens,
|
||||
transcript,
|
||||
random_tape,
|
||||
|
@ -348,8 +344,8 @@ impl R1CSEvalProof {
|
|||
pub fn verify(
|
||||
&self,
|
||||
comm: &R1CSCommitment,
|
||||
rx: &Vec<Scalar>, // point at which the R1CS matrix polynomials are evaluated
|
||||
ry: &Vec<Scalar>,
|
||||
rx: &[Scalar], // point at which the R1CS matrix polynomials are evaluated
|
||||
ry: &[Scalar],
|
||||
eval: &R1CSInstanceEvals,
|
||||
gens: &R1CSCommitmentGens,
|
||||
transcript: &mut Transcript,
|
||||
|
@ -360,7 +356,7 @@ impl R1CSEvalProof {
|
|||
&comm.comm,
|
||||
rx,
|
||||
ry,
|
||||
&vec![eval.eval_A_r, eval.eval_B_r, eval.eval_C_r],
|
||||
&[eval.eval_A_r, eval.eval_B_r, eval.eval_C_r],
|
||||
&gens.gens,
|
||||
transcript
|
||||
)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#![allow(clippy::too_many_arguments)]
|
||||
use super::commitments::{Commitments, MultiCommitGens};
|
||||
use super::dense_mlpoly::{
|
||||
DensePolynomial, EqPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof,
|
||||
|
@ -13,9 +14,9 @@ use super::sparse_mlpoly::{SparsePolyEntry, SparsePolynomial};
|
|||
use super::sumcheck::ZKSumcheckInstanceProof;
|
||||
use super::timer::Timer;
|
||||
use super::transcript::{AppendToTranscript, ProofTranscript};
|
||||
use core::iter;
|
||||
use merlin::Transcript;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::iter;
|
||||
|
||||
#[cfg(test)]
|
||||
use super::sparse_mlpoly::{SparseMatEntry, SparseMatPolynomial};
|
||||
|
@ -144,7 +145,7 @@ impl R1CSProof {
|
|||
pub fn prove(
|
||||
inst: &R1CSInstance,
|
||||
vars: Vec<Scalar>,
|
||||
input: &Vec<Scalar>,
|
||||
input: &[Scalar],
|
||||
gens: &R1CSGens,
|
||||
transcript: &mut Transcript,
|
||||
random_tape: &mut RandomTape,
|
||||
|
@ -153,7 +154,7 @@ impl R1CSProof {
|
|||
transcript.append_protocol_name(R1CSProof::protocol_name());
|
||||
|
||||
// we currently require the number of |inputs| + 1 to be at most number of vars
|
||||
assert!(input.len() + 1 <= vars.len());
|
||||
assert!(input.len() < vars.len());
|
||||
|
||||
let timer_commit = Timer::new("polycommit");
|
||||
let (poly_vars, comm_vars, blinds_vars) = {
|
||||
|
@ -186,7 +187,7 @@ impl R1CSProof {
|
|||
let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log2(), z.len().log2());
|
||||
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
|
||||
// compute the initial evaluation table for R(\tau, x)
|
||||
let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau.clone()).evals());
|
||||
let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals());
|
||||
let (mut poly_Az, mut poly_Bz, mut poly_Cz) =
|
||||
inst.multiply_vec(inst.get_num_cons(), z.len(), &z);
|
||||
|
||||
|
@ -247,7 +248,7 @@ impl R1CSProof {
|
|||
|
||||
// prove the final step of sum-check #1
|
||||
let taus_bound_rx = tau_claim;
|
||||
let blind_expected_claim_postsc1 = taus_bound_rx * (&prod_Az_Bz_blind - &Cz_blind);
|
||||
let blind_expected_claim_postsc1 = taus_bound_rx * (prod_Az_Bz_blind - Cz_blind);
|
||||
let claim_post_phase1 = (Az_claim * Bz_claim - Cz_claim) * taus_bound_rx;
|
||||
let (proof_eq_sc_phase1, _C1, _C2) = EqualityProof::prove(
|
||||
&gens.gens_sc.gens_1,
|
||||
|
@ -264,8 +265,8 @@ impl R1CSProof {
|
|||
let r_A = transcript.challenge_scalar(b"challenege_Az");
|
||||
let r_B = transcript.challenge_scalar(b"challenege_Bz");
|
||||
let r_C = transcript.challenge_scalar(b"challenege_Cz");
|
||||
let claim_phase2 = &r_A * Az_claim + &r_B * Bz_claim + &r_C * Cz_claim;
|
||||
let blind_claim_phase2 = &r_A * Az_blind + &r_B * Bz_blind + &r_C * Cz_blind;
|
||||
let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim;
|
||||
let blind_claim_phase2 = r_A * Az_blind + r_B * Bz_blind + r_C * Cz_blind;
|
||||
|
||||
let evals_ABC = {
|
||||
// compute the initial evaluation table for R(\tau, x)
|
||||
|
@ -276,7 +277,7 @@ impl R1CSProof {
|
|||
assert_eq!(evals_A.len(), evals_B.len());
|
||||
assert_eq!(evals_A.len(), evals_C.len());
|
||||
(0..evals_A.len())
|
||||
.map(|i| &r_A * &evals_A[i] + &r_B * &evals_B[i] + &r_C * &evals_C[i])
|
||||
.map(|i| r_A * evals_A[i] + r_B * evals_B[i] + r_C * evals_C[i])
|
||||
.collect::<Vec<Scalar>>()
|
||||
};
|
||||
|
||||
|
@ -309,9 +310,9 @@ impl R1CSProof {
|
|||
timer_polyeval.stop();
|
||||
|
||||
// prove the final step of sum-check #2
|
||||
let blind_eval_Z_at_ry = (Scalar::one() - &ry[0]) * blind_eval;
|
||||
let blind_expected_claim_postsc2 = &claims_phase2[1] * &blind_eval_Z_at_ry;
|
||||
let claim_post_phase2 = &claims_phase2[0] * &claims_phase2[1];
|
||||
let blind_eval_Z_at_ry = (Scalar::one() - ry[0]) * blind_eval;
|
||||
let blind_expected_claim_postsc2 = claims_phase2[1] * blind_eval_Z_at_ry;
|
||||
let claim_post_phase2 = claims_phase2[0] * claims_phase2[1];
|
||||
let (proof_eq_sc_phase2, _C1, _C2) = EqualityProof::prove(
|
||||
&gens.gens_pc.gens.gens_1,
|
||||
transcript,
|
||||
|
@ -350,7 +351,7 @@ impl R1CSProof {
|
|||
&self,
|
||||
num_vars: usize,
|
||||
num_cons: usize,
|
||||
input: &Vec<Scalar>,
|
||||
input: &[Scalar],
|
||||
evals: &R1CSInstanceEvals,
|
||||
transcript: &mut Transcript,
|
||||
gens: &R1CSGens,
|
||||
|
@ -407,9 +408,9 @@ impl R1CSProof {
|
|||
comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript);
|
||||
|
||||
let taus_bound_rx: Scalar = (0..rx.len())
|
||||
.map(|i| &rx[i] * &tau[i] + (&Scalar::one() - &rx[i]) * (&Scalar::one() - &tau[i]))
|
||||
.map(|i| rx[i] * tau[i] + (Scalar::one() - rx[i]) * (Scalar::one() - tau[i]))
|
||||
.product();
|
||||
let expected_claim_post_phase1 = (&taus_bound_rx
|
||||
let expected_claim_post_phase1 = (taus_bound_rx
|
||||
* (comm_prod_Az_Bz_claims.decompress().unwrap() - comm_Cz_claim.decompress().unwrap()))
|
||||
.compress();
|
||||
|
||||
|
@ -481,7 +482,7 @@ impl R1CSProof {
|
|||
|
||||
// compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval
|
||||
let comm_eval_Z_at_ry = GroupElement::vartime_multiscalar_mul(
|
||||
iter::once(Scalar::one() - &ry[0]).chain(iter::once(ry[0])),
|
||||
iter::once(Scalar::one() - ry[0]).chain(iter::once(ry[0])),
|
||||
iter::once(&self.comm_vars_at_ry.decompress().unwrap()).chain(iter::once(
|
||||
&poly_input_eval.commit(&Scalar::zero(), &gens.gens_pc.gens.gens_1),
|
||||
)),
|
||||
|
@ -490,7 +491,7 @@ impl R1CSProof {
|
|||
// perform the final check in the second sum-check protocol
|
||||
let (eval_A_r, eval_B_r, eval_C_r) = evals.get_evaluations();
|
||||
let expected_claim_post_phase2 =
|
||||
(&(&r_A * &eval_A_r + &r_B * &eval_B_r + &r_C * &eval_C_r) * comm_eval_Z_at_ry).compress();
|
||||
((r_A * eval_A_r + r_B * eval_B_r + r_C * eval_C_r) * comm_eval_Z_at_ry).compress();
|
||||
// verify proof that expected_claim_post_phase1 == claim_post_phase1
|
||||
assert!(self
|
||||
.proof_eq_sc_phase2
|
||||
|
|
|
@ -27,8 +27,7 @@ impl ScalarFromPrimitives for bool {
|
|||
|
||||
pub trait ScalarBytesFromScalar {
|
||||
fn decompress_scalar(s: &Scalar) -> ScalarBytes;
|
||||
fn decompress_vec(v: &Vec<Scalar>) -> Vec<ScalarBytes>;
|
||||
fn decompress_seq(s: &[Scalar]) -> Vec<ScalarBytes>;
|
||||
fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes>;
|
||||
}
|
||||
|
||||
impl ScalarBytesFromScalar for Scalar {
|
||||
|
@ -36,13 +35,7 @@ impl ScalarBytesFromScalar for Scalar {
|
|||
ScalarBytes::from_bytes_mod_order(s.to_bytes())
|
||||
}
|
||||
|
||||
fn decompress_vec(v: &Vec<Scalar>) -> Vec<ScalarBytes> {
|
||||
(0..v.len())
|
||||
.map(|i| Scalar::decompress_scalar(&v[i]))
|
||||
.collect::<Vec<ScalarBytes>>()
|
||||
}
|
||||
|
||||
fn decompress_seq(s: &[Scalar]) -> Vec<ScalarBytes> {
|
||||
fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes> {
|
||||
(0..s.len())
|
||||
.map(|i| Scalar::decompress_scalar(&s[i]))
|
||||
.collect::<Vec<ScalarBytes>>()
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
//! The entire file is an adaptation from bls12-381 crate. We modify various constants (MODULUS, R, R2, etc.) to appropriate values for Curve25519 and update tests
|
||||
//! We borrow the `invert` method from curve25519-dalek crate
|
||||
//! See NOTICE.md for more details
|
||||
|
||||
#![allow(clippy::all)]
|
||||
use core::borrow::Borrow;
|
||||
use core::convert::TryFrom;
|
||||
use core::fmt;
|
||||
|
@ -246,10 +246,10 @@ impl ConditionallySelectable for Scalar {
|
|||
/// q = 2^252 + 27742317777372353535851937790883648493
|
||||
/// 0x1000000000000000 0000000000000000 14def9dea2f79cd6 5812631a5cf5d3ed
|
||||
const MODULUS: Scalar = Scalar([
|
||||
0x5812631a5cf5d3ed,
|
||||
0x14def9dea2f79cd6,
|
||||
0x0000000000000000,
|
||||
0x1000000000000000,
|
||||
0x5812_631a_5cf5_d3ed,
|
||||
0x14de_f9de_a2f7_9cd6,
|
||||
0x0000_0000_0000_0000,
|
||||
0x1000_0000_0000_0000,
|
||||
]);
|
||||
|
||||
impl<'a> Neg for &'a Scalar {
|
||||
|
@ -301,30 +301,30 @@ impl_binops_additive!(Scalar, Scalar);
|
|||
impl_binops_multiplicative!(Scalar, Scalar);
|
||||
|
||||
/// INV = -(q^{-1} mod 2^64) mod 2^64
|
||||
const INV: u64 = 0xd2b51da312547e1b;
|
||||
const INV: u64 = 0xd2b5_1da3_1254_7e1b;
|
||||
|
||||
/// R = 2^256 mod q
|
||||
const R: Scalar = Scalar([
|
||||
0xd6ec31748d98951d,
|
||||
0xc6ef5bf4737dcf70,
|
||||
0xfffffffffffffffe,
|
||||
0x0fffffffffffffff,
|
||||
0xd6ec_3174_8d98_951d,
|
||||
0xc6ef_5bf4_737d_cf70,
|
||||
0xffff_ffff_ffff_fffe,
|
||||
0x0fff_ffff_ffff_ffff,
|
||||
]);
|
||||
|
||||
/// R^2 = 2^512 mod q
|
||||
const R2: Scalar = Scalar([
|
||||
0xa40611e3449c0f01,
|
||||
0xd00e1ba768859347,
|
||||
0xceec73d217f5be65,
|
||||
0x0399411b7c309a3d,
|
||||
0xa406_11e3_449c_0f01,
|
||||
0xd00e_1ba7_6885_9347,
|
||||
0xceec_73d2_17f5_be65,
|
||||
0x0399_411b_7c30_9a3d,
|
||||
]);
|
||||
|
||||
/// R^3 = 2^768 mod q
|
||||
const R3: Scalar = Scalar([
|
||||
0x2a9e49687b83a2db,
|
||||
0x278324e6aef7f3ec,
|
||||
0x8065dc6c04ec5b65,
|
||||
0xe530b773599cec7,
|
||||
0x2a9e_4968_7b83_a2db,
|
||||
0x2783_24e6_aef7_f3ec,
|
||||
0x8065_dc6c_04ec_5b65,
|
||||
0x0e53_0b77_3599_cec7,
|
||||
]);
|
||||
|
||||
impl Default for Scalar {
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
#![allow(clippy::type_complexity)]
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
#![allow(clippy::needless_range_loop)]
|
||||
use super::dense_mlpoly::DensePolynomial;
|
||||
use super::dense_mlpoly::{
|
||||
EqPolynomial, IdentityPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof,
|
||||
|
@ -9,6 +12,7 @@ use super::random::RandomTape;
|
|||
use super::scalar::Scalar;
|
||||
use super::timer::Timer;
|
||||
use super::transcript::{AppendToTranscript, ProofTranscript};
|
||||
use core::cmp::Ordering;
|
||||
use merlin::Transcript;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
|
@ -75,7 +79,7 @@ impl DerefsEvalProof {
|
|||
|
||||
fn prove_single(
|
||||
joint_poly: &DensePolynomial,
|
||||
r: &Vec<Scalar>,
|
||||
r: &[Scalar],
|
||||
evals: Vec<Scalar>,
|
||||
gens: &PolyCommitmentGens,
|
||||
transcript: &mut Transcript,
|
||||
|
@ -118,16 +122,16 @@ impl DerefsEvalProof {
|
|||
// evalues both polynomials at r and produces a joint proof of opening
|
||||
pub fn prove(
|
||||
derefs: &Derefs,
|
||||
eval_row_ops_val_vec: &Vec<Scalar>,
|
||||
eval_col_ops_val_vec: &Vec<Scalar>,
|
||||
r: &Vec<Scalar>,
|
||||
eval_row_ops_val_vec: &[Scalar],
|
||||
eval_col_ops_val_vec: &[Scalar],
|
||||
r: &[Scalar],
|
||||
gens: &PolyCommitmentGens,
|
||||
transcript: &mut Transcript,
|
||||
random_tape: &mut RandomTape,
|
||||
) -> Self {
|
||||
transcript.append_protocol_name(DerefsEvalProof::protocol_name());
|
||||
|
||||
let mut evals = eval_row_ops_val_vec.clone();
|
||||
let mut evals = eval_row_ops_val_vec.to_owned();
|
||||
evals.extend(eval_col_ops_val_vec);
|
||||
evals.resize(evals.len().next_power_of_two(), Scalar::zero());
|
||||
|
||||
|
@ -140,7 +144,7 @@ impl DerefsEvalProof {
|
|||
fn verify_single(
|
||||
proof: &PolyEvalProof,
|
||||
comm: &PolyCommitment,
|
||||
r: &Vec<Scalar>,
|
||||
r: &[Scalar],
|
||||
evals: Vec<Scalar>,
|
||||
gens: &PolyCommitmentGens,
|
||||
transcript: &mut Transcript,
|
||||
|
@ -171,15 +175,15 @@ impl DerefsEvalProof {
|
|||
// verify evaluations of both polynomials at r
|
||||
pub fn verify(
|
||||
&self,
|
||||
r: &Vec<Scalar>,
|
||||
eval_row_ops_val_vec: &Vec<Scalar>,
|
||||
eval_col_ops_val_vec: &Vec<Scalar>,
|
||||
r: &[Scalar],
|
||||
eval_row_ops_val_vec: &[Scalar],
|
||||
eval_col_ops_val_vec: &[Scalar],
|
||||
gens: &PolyCommitmentGens,
|
||||
comm: &DerefsCommitment,
|
||||
transcript: &mut Transcript,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
transcript.append_protocol_name(DerefsEvalProof::protocol_name());
|
||||
let mut evals = eval_row_ops_val_vec.clone();
|
||||
let mut evals = eval_row_ops_val_vec.to_owned();
|
||||
evals.extend(eval_col_ops_val_vec);
|
||||
evals.resize(evals.len().next_power_of_two(), Scalar::zero());
|
||||
|
||||
|
@ -214,15 +218,14 @@ struct AddrTimestamps {
|
|||
|
||||
impl AddrTimestamps {
|
||||
pub fn new(num_cells: usize, num_ops: usize, ops_addr: Vec<Vec<usize>>) -> Self {
|
||||
for i in 0..ops_addr.len() {
|
||||
assert_eq!(ops_addr[i].len(), num_ops);
|
||||
for item in ops_addr.iter() {
|
||||
assert_eq!(item.len(), num_ops);
|
||||
}
|
||||
|
||||
let mut audit_ts = vec![0usize; num_cells];
|
||||
let mut ops_addr_vec: Vec<DensePolynomial> = Vec::new();
|
||||
let mut read_ts_vec: Vec<DensePolynomial> = Vec::new();
|
||||
for i in 0..ops_addr.len() {
|
||||
let ops_addr_inst = &ops_addr[i];
|
||||
for ops_addr_inst in ops_addr.iter() {
|
||||
let mut read_ts = vec![0usize; num_ops];
|
||||
|
||||
// since read timestamps are trustworthy, we can simply increment the r-ts to obtain a w-ts
|
||||
|
@ -249,7 +252,7 @@ impl AddrTimestamps {
|
|||
}
|
||||
}
|
||||
|
||||
fn deref_mem(addr: &Vec<usize>, mem_val: &Vec<Scalar>) -> DensePolynomial {
|
||||
fn deref_mem(addr: &[usize], mem_val: &[Scalar]) -> DensePolynomial {
|
||||
DensePolynomial::new(
|
||||
(0..addr.len())
|
||||
.map(|i| {
|
||||
|
@ -260,7 +263,7 @@ impl AddrTimestamps {
|
|||
)
|
||||
}
|
||||
|
||||
pub fn deref(&self, mem_val: &Vec<Scalar>) -> Vec<DensePolynomial> {
|
||||
pub fn deref(&self, mem_val: &[Scalar]) -> Vec<DensePolynomial> {
|
||||
(0..self.ops_addr.len())
|
||||
.map(|i| AddrTimestamps::deref_mem(&self.ops_addr_usize[i], mem_val))
|
||||
.collect::<Vec<DensePolynomial>>()
|
||||
|
@ -306,7 +309,7 @@ impl SparseMatPolyCommitmentGens {
|
|||
) -> SparseMatPolyCommitmentGens {
|
||||
let num_vars_ops = size.size_ops + (batch_size * 5).next_power_of_two().log2();
|
||||
let num_vars_mem = size.size_mem + 1;
|
||||
let num_vars_derefs = size.size_derefs + (batch_size * 1).next_power_of_two().log2();
|
||||
let num_vars_derefs = size.size_derefs + batch_size.next_power_of_two().log2();
|
||||
|
||||
let gens_ops = PolyCommitmentGens::new(num_vars_ops, label);
|
||||
let gens_mem = PolyCommitmentGens::new(num_vars_mem, label);
|
||||
|
@ -356,9 +359,9 @@ impl SparseMatPolynomial {
|
|||
}
|
||||
|
||||
fn multi_sparse_to_dense_rep(
|
||||
sparse_polys: &Vec<&SparseMatPolynomial>,
|
||||
sparse_polys: &[&SparseMatPolynomial],
|
||||
) -> MultiSparseMatPolynomialAsDense {
|
||||
assert!(sparse_polys.len() > 0);
|
||||
assert!(!sparse_polys.is_empty());
|
||||
for i in 1..sparse_polys.len() {
|
||||
assert_eq!(sparse_polys[i].num_vars_x, sparse_polys[0].num_vars_x);
|
||||
assert_eq!(sparse_polys[i].num_vars_y, sparse_polys[0].num_vars_y);
|
||||
|
@ -414,7 +417,7 @@ impl SparseMatPolynomial {
|
|||
}
|
||||
|
||||
pub fn size(&self) -> SparseMatPolynomialSize {
|
||||
let dense = SparseMatPolynomial::multi_sparse_to_dense_rep(&vec![&self]);
|
||||
let dense = SparseMatPolynomial::multi_sparse_to_dense_rep(&[&self]);
|
||||
|
||||
assert_eq!(dense.col.audit_ts.len(), dense.row.audit_ts.len());
|
||||
|
||||
|
@ -425,11 +428,7 @@ impl SparseMatPolynomial {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn evaluate_with_tables(
|
||||
&self,
|
||||
eval_table_rx: &Vec<Scalar>,
|
||||
eval_table_ry: &Vec<Scalar>,
|
||||
) -> Scalar {
|
||||
pub fn evaluate_with_tables(&self, eval_table_rx: &[Scalar], eval_table_ry: &[Scalar]) -> Scalar {
|
||||
assert_eq!(self.num_vars_x.pow2(), eval_table_rx.len());
|
||||
assert_eq!(self.num_vars_y.pow2(), eval_table_ry.len());
|
||||
|
||||
|
@ -438,14 +437,14 @@ impl SparseMatPolynomial {
|
|||
let row = self.M[i].row;
|
||||
let col = self.M[i].col;
|
||||
let val = &self.M[i].val;
|
||||
&eval_table_rx[row] * &eval_table_ry[col] * val
|
||||
eval_table_rx[row] * eval_table_ry[col] * val
|
||||
})
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub fn evaluate(&self, rx: &Vec<Scalar>, ry: &Vec<Scalar>) -> Scalar {
|
||||
let eval_table_rx = EqPolynomial::new(rx.clone()).evals();
|
||||
let eval_table_ry = EqPolynomial::new(ry.clone()).evals();
|
||||
pub fn evaluate(&self, rx: &[Scalar], ry: &[Scalar]) -> Scalar {
|
||||
let eval_table_rx = EqPolynomial::new(rx.to_vec()).evals();
|
||||
let eval_table_ry = EqPolynomial::new(ry.to_vec()).evals();
|
||||
assert_eq!(self.num_vars_x.pow2(), eval_table_rx.len());
|
||||
assert_eq!(self.num_vars_y.pow2(), eval_table_ry.len());
|
||||
|
||||
|
@ -454,12 +453,12 @@ impl SparseMatPolynomial {
|
|||
let row = self.M[i].row;
|
||||
let col = self.M[i].col;
|
||||
let val = &self.M[i].val;
|
||||
&eval_table_rx[row] * &eval_table_ry[col] * val
|
||||
eval_table_rx[row] * eval_table_ry[col] * val
|
||||
})
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub fn multiply_vec(&self, num_rows: usize, num_cols: usize, z: &Vec<Scalar>) -> Vec<Scalar> {
|
||||
pub fn multiply_vec(&self, num_rows: usize, num_cols: usize, z: &[Scalar]) -> Vec<Scalar> {
|
||||
assert_eq!(z.len(), num_cols);
|
||||
|
||||
(0..self.M.len())
|
||||
|
@ -477,7 +476,7 @@ impl SparseMatPolynomial {
|
|||
|
||||
pub fn compute_eval_table_sparse(
|
||||
&self,
|
||||
rx: &Vec<Scalar>,
|
||||
rx: &[Scalar],
|
||||
num_rows: usize,
|
||||
num_cols: usize,
|
||||
) -> Vec<Scalar> {
|
||||
|
@ -493,7 +492,7 @@ impl SparseMatPolynomial {
|
|||
}
|
||||
|
||||
pub fn multi_commit(
|
||||
sparse_polys: &Vec<&SparseMatPolynomial>,
|
||||
sparse_polys: &[&SparseMatPolynomial],
|
||||
gens: &SparseMatPolyCommitmentGens,
|
||||
) -> (SparseMatPolyCommitment, MultiSparseMatPolynomialAsDense) {
|
||||
let batch_size = sparse_polys.len();
|
||||
|
@ -516,7 +515,7 @@ impl SparseMatPolynomial {
|
|||
}
|
||||
|
||||
impl MultiSparseMatPolynomialAsDense {
|
||||
pub fn deref(&self, row_mem_val: &Vec<Scalar>, col_mem_val: &Vec<Scalar>) -> Derefs {
|
||||
pub fn deref(&self, row_mem_val: &[Scalar], col_mem_val: &[Scalar]) -> Derefs {
|
||||
let row_ops_val = self.row.deref(row_mem_val);
|
||||
let col_ops_val = self.col.deref(col_mem_val);
|
||||
|
||||
|
@ -548,10 +547,10 @@ struct Layers {
|
|||
|
||||
impl Layers {
|
||||
fn build_hash_layer(
|
||||
eval_table: &Vec<Scalar>,
|
||||
addrs_vec: &Vec<DensePolynomial>,
|
||||
derefs_vec: &Vec<DensePolynomial>,
|
||||
read_ts_vec: &Vec<DensePolynomial>,
|
||||
eval_table: &[Scalar],
|
||||
addrs_vec: &[DensePolynomial],
|
||||
derefs_vec: &[DensePolynomial],
|
||||
read_ts_vec: &[DensePolynomial],
|
||||
audit_ts: &DensePolynomial,
|
||||
r_mem_check: &(Scalar, Scalar),
|
||||
) -> (
|
||||
|
@ -565,7 +564,7 @@ impl Layers {
|
|||
//hash(addr, val, ts) = ts * r_hash_sqr + val * r_hash + addr
|
||||
let r_hash_sqr = r_hash * r_hash;
|
||||
let hash_func = |addr: &Scalar, val: &Scalar, ts: &Scalar| -> Scalar {
|
||||
ts * &r_hash_sqr + val * r_hash + addr
|
||||
ts * r_hash_sqr + val * r_hash + addr
|
||||
};
|
||||
|
||||
// hash init and audit that does not depend on #instances
|
||||
|
@ -574,7 +573,7 @@ impl Layers {
|
|||
(0..num_mem_cells)
|
||||
.map(|i| {
|
||||
// at init time, addr is given by i, init value is given by eval_table, and ts = 0
|
||||
&hash_func(&Scalar::from(i as u64), &eval_table[i], &Scalar::zero()) - r_multiset_check
|
||||
hash_func(&Scalar::from(i as u64), &eval_table[i], &Scalar::zero()) - r_multiset_check
|
||||
})
|
||||
.collect::<Vec<Scalar>>(),
|
||||
);
|
||||
|
@ -582,7 +581,7 @@ impl Layers {
|
|||
(0..num_mem_cells)
|
||||
.map(|i| {
|
||||
// at audit time, addr is given by i, value is given by eval_table, and ts is given by audit_ts
|
||||
&hash_func(&Scalar::from(i as u64), &eval_table[i], &audit_ts[i]) - r_multiset_check
|
||||
hash_func(&Scalar::from(i as u64), &eval_table[i], &audit_ts[i]) - r_multiset_check
|
||||
})
|
||||
.collect::<Vec<Scalar>>(),
|
||||
);
|
||||
|
@ -599,7 +598,7 @@ impl Layers {
|
|||
(0..num_ops)
|
||||
.map(|i| {
|
||||
// at read time, addr is given by addrs, value is given by derefs, and ts is given by read_ts
|
||||
&hash_func(&addrs[i], &derefs[i], &read_ts[i]) - r_multiset_check
|
||||
hash_func(&addrs[i], &derefs[i], &read_ts[i]) - r_multiset_check
|
||||
})
|
||||
.collect::<Vec<Scalar>>(),
|
||||
);
|
||||
|
@ -609,7 +608,7 @@ impl Layers {
|
|||
(0..num_ops)
|
||||
.map(|i| {
|
||||
// at write time, addr is given by addrs, value is given by derefs, and ts is given by write_ts = read_ts + 1
|
||||
&hash_func(&addrs[i], &derefs[i], &(&read_ts[i] + &Scalar::one())) - r_multiset_check
|
||||
hash_func(&addrs[i], &derefs[i], &(read_ts[i] + Scalar::one())) - r_multiset_check
|
||||
})
|
||||
.collect::<Vec<Scalar>>(),
|
||||
);
|
||||
|
@ -625,9 +624,9 @@ impl Layers {
|
|||
}
|
||||
|
||||
pub fn new(
|
||||
eval_table: &Vec<Scalar>,
|
||||
eval_table: &[Scalar],
|
||||
addr_timestamps: &AddrTimestamps,
|
||||
poly_ops_val: &Vec<DensePolynomial>,
|
||||
poly_ops_val: &[DensePolynomial],
|
||||
r_mem_check: &(Scalar, Scalar),
|
||||
) -> Self {
|
||||
let (poly_init_hashed, poly_read_hashed_vec, poly_write_hashed_vec, poly_audit_hashed) =
|
||||
|
@ -691,8 +690,8 @@ impl PolyEvalNetwork {
|
|||
pub fn new(
|
||||
dense: &MultiSparseMatPolynomialAsDense,
|
||||
derefs: &Derefs,
|
||||
mem_rx: &Vec<Scalar>,
|
||||
mem_ry: &Vec<Scalar>,
|
||||
mem_rx: &[Scalar],
|
||||
mem_ry: &[Scalar],
|
||||
r_mem_check: &(Scalar, Scalar),
|
||||
) -> Self {
|
||||
let row_layers = Layers::new(mem_rx, &dense.row, &derefs.row_ops_val, r_mem_check);
|
||||
|
@ -762,10 +761,10 @@ impl HashLayerProof {
|
|||
// decommit derefs at rand_ops
|
||||
let eval_row_ops_val = (0..derefs.row_ops_val.len())
|
||||
.map(|i| derefs.row_ops_val[i].evaluate(&rand_ops))
|
||||
.collect();
|
||||
.collect::<Vec<Scalar>>();
|
||||
let eval_col_ops_val = (0..derefs.col_ops_val.len())
|
||||
.map(|i| derefs.col_ops_val[i].evaluate(&rand_ops))
|
||||
.collect();
|
||||
.collect::<Vec<Scalar>>();
|
||||
let proof_derefs = DerefsEvalProof::prove(
|
||||
derefs,
|
||||
&eval_row_ops_val,
|
||||
|
@ -861,17 +860,17 @@ impl HashLayerProof {
|
|||
fn verify_helper(
|
||||
rand: &(&Vec<Scalar>, &Vec<Scalar>),
|
||||
claims: &(Scalar, Vec<Scalar>, Vec<Scalar>, Scalar),
|
||||
eval_ops_val: &Vec<Scalar>,
|
||||
eval_ops_addr: &Vec<Scalar>,
|
||||
eval_read_ts: &Vec<Scalar>,
|
||||
eval_ops_val: &[Scalar],
|
||||
eval_ops_addr: &[Scalar],
|
||||
eval_read_ts: &[Scalar],
|
||||
eval_audit_ts: &Scalar,
|
||||
r: &Vec<Scalar>,
|
||||
r: &[Scalar],
|
||||
r_hash: &Scalar,
|
||||
r_multiset_check: &Scalar,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
let r_hash_sqr = r_hash * r_hash;
|
||||
let hash_func = |addr: &Scalar, val: &Scalar, ts: &Scalar| -> Scalar {
|
||||
ts * &r_hash_sqr + val * r_hash + addr
|
||||
ts * r_hash_sqr + val * r_hash + addr
|
||||
};
|
||||
|
||||
let (rand_mem, _rand_ops) = rand;
|
||||
|
@ -879,7 +878,7 @@ impl HashLayerProof {
|
|||
|
||||
// init
|
||||
let eval_init_addr = IdentityPolynomial::new(rand_mem.len()).evaluate(rand_mem);
|
||||
let eval_init_val = EqPolynomial::new(r.clone()).evaluate(rand_mem);
|
||||
let eval_init_val = EqPolynomial::new(r.to_vec()).evaluate(rand_mem);
|
||||
let hash_init_at_rand_mem =
|
||||
hash_func(&eval_init_addr, &eval_init_val, &Scalar::zero()) - r_multiset_check; // verify the claim_last of init chunk
|
||||
assert_eq!(&hash_init_at_rand_mem, claim_init);
|
||||
|
@ -894,7 +893,7 @@ impl HashLayerProof {
|
|||
|
||||
// write: shares addr, val component; only decommit write_ts
|
||||
for i in 0..eval_ops_addr.len() {
|
||||
let eval_write_ts = eval_read_ts[i] + &Scalar::one();
|
||||
let eval_write_ts = eval_read_ts[i] + Scalar::one();
|
||||
let hash_write_at_rand_ops =
|
||||
hash_func(&eval_ops_addr[i], &eval_ops_val[i], &eval_write_ts) - r_multiset_check; // verify the claim_last of init chunk
|
||||
assert_eq!(&hash_write_at_rand_ops, &claim_write[i]);
|
||||
|
@ -915,12 +914,12 @@ impl HashLayerProof {
|
|||
rand: (&Vec<Scalar>, &Vec<Scalar>),
|
||||
claims_row: &(Scalar, Vec<Scalar>, Vec<Scalar>, Scalar),
|
||||
claims_col: &(Scalar, Vec<Scalar>, Vec<Scalar>, Scalar),
|
||||
claims_dotp: &Vec<Scalar>,
|
||||
claims_dotp: &[Scalar],
|
||||
comm: &SparseMatPolyCommitment,
|
||||
gens: &SparseMatPolyCommitmentGens,
|
||||
comm_derefs: &DerefsCommitment,
|
||||
rx: &Vec<Scalar>,
|
||||
ry: &Vec<Scalar>,
|
||||
rx: &[Scalar],
|
||||
ry: &[Scalar],
|
||||
r_hash: &Scalar,
|
||||
r_multiset_check: &Scalar,
|
||||
transcript: &mut Transcript,
|
||||
|
@ -1073,7 +1072,7 @@ impl ProductLayerProof {
|
|||
col_prod_layer: &mut ProductLayer,
|
||||
dense: &MultiSparseMatPolynomialAsDense,
|
||||
derefs: &Derefs,
|
||||
eval: &Vec<Scalar>,
|
||||
eval: &[Scalar],
|
||||
transcript: &mut Transcript,
|
||||
) -> (Self, Vec<Scalar>, Vec<Scalar>) {
|
||||
transcript.append_protocol_name(ProductLayerProof::protocol_name());
|
||||
|
@ -1143,7 +1142,7 @@ impl ProductLayerProof {
|
|||
|
||||
eval_dotp_left.append_to_transcript(b"claim_eval_dotp_left", transcript);
|
||||
eval_dotp_right.append_to_transcript(b"claim_eval_dotp_right", transcript);
|
||||
assert_eq!(&eval_dotp_left + eval_dotp_right, eval[i]);
|
||||
assert_eq!(eval_dotp_left + eval_dotp_right, eval[i]);
|
||||
eval_dotp_left_vec.push(eval_dotp_left);
|
||||
eval_dotp_right_vec.push(eval_dotp_right);
|
||||
|
||||
|
@ -1252,7 +1251,7 @@ impl ProductLayerProof {
|
|||
&self,
|
||||
num_ops: usize,
|
||||
num_cells: usize,
|
||||
eval: &Vec<Scalar>,
|
||||
eval: &[Scalar],
|
||||
transcript: &mut Transcript,
|
||||
) -> Result<
|
||||
(
|
||||
|
@ -1305,7 +1304,7 @@ impl ProductLayerProof {
|
|||
assert_eq!(eval_dotp_left.len(), num_instances);
|
||||
let mut claims_dotp_circuit: Vec<Scalar> = Vec::new();
|
||||
for i in 0..num_instances {
|
||||
assert_eq!(&eval_dotp_left[i] + &eval_dotp_right[i], eval[i]);
|
||||
assert_eq!(eval_dotp_left[i] + eval_dotp_right[i], eval[i]);
|
||||
eval_dotp_left[i].append_to_transcript(b"claim_eval_dotp_left", transcript);
|
||||
eval_dotp_right[i].append_to_transcript(b"claim_eval_dotp_right", transcript);
|
||||
|
||||
|
@ -1328,7 +1327,7 @@ impl ProductLayerProof {
|
|||
);
|
||||
// verify the correctness of claim_row_eval_init and claim_row_eval_audit
|
||||
let (claims_mem, _claims_mem_dotp, rand_mem) = self.proof_mem.verify(
|
||||
&vec![
|
||||
&[
|
||||
*row_eval_init,
|
||||
*row_eval_audit,
|
||||
*col_eval_init,
|
||||
|
@ -1359,7 +1358,7 @@ impl PolyEvalNetworkProof {
|
|||
network: &mut PolyEvalNetwork,
|
||||
dense: &MultiSparseMatPolynomialAsDense,
|
||||
derefs: &Derefs,
|
||||
evals: &Vec<Scalar>,
|
||||
evals: &[Scalar],
|
||||
gens: &SparseMatPolyCommitmentGens,
|
||||
transcript: &mut Transcript,
|
||||
random_tape: &mut RandomTape,
|
||||
|
@ -1395,10 +1394,10 @@ impl PolyEvalNetworkProof {
|
|||
&self,
|
||||
comm: &SparseMatPolyCommitment,
|
||||
comm_derefs: &DerefsCommitment,
|
||||
evals: &Vec<Scalar>,
|
||||
evals: &[Scalar],
|
||||
gens: &SparseMatPolyCommitmentGens,
|
||||
rx: &Vec<Scalar>,
|
||||
ry: &Vec<Scalar>,
|
||||
rx: &[Scalar],
|
||||
ry: &[Scalar],
|
||||
r_mem_check: &(Scalar, Scalar),
|
||||
nz: usize,
|
||||
transcript: &mut Transcript,
|
||||
|
@ -1471,27 +1470,29 @@ impl SparseMatPolyEvalProof {
|
|||
b"Sparse polynomial evaluation proof"
|
||||
}
|
||||
|
||||
fn equalize(rx: &Vec<Scalar>, ry: &Vec<Scalar>) -> (Vec<Scalar>, Vec<Scalar>) {
|
||||
if rx.len() < ry.len() {
|
||||
let diff = ry.len() - rx.len();
|
||||
let mut rx_ext = vec![Scalar::zero(); diff];
|
||||
rx_ext.extend(rx);
|
||||
(rx_ext, ry.clone())
|
||||
} else if rx.len() > ry.len() {
|
||||
let diff = rx.len() - ry.len();
|
||||
let mut ry_ext = vec![Scalar::zero(); diff];
|
||||
ry_ext.extend(ry);
|
||||
(rx.clone(), ry_ext)
|
||||
} else {
|
||||
(rx.clone(), ry.clone())
|
||||
fn equalize(rx: &[Scalar], ry: &[Scalar]) -> (Vec<Scalar>, Vec<Scalar>) {
|
||||
match rx.len().cmp(&ry.len()) {
|
||||
Ordering::Less => {
|
||||
let diff = ry.len() - rx.len();
|
||||
let mut rx_ext = vec![Scalar::zero(); diff];
|
||||
rx_ext.extend(rx);
|
||||
(rx_ext, ry.to_vec())
|
||||
}
|
||||
Ordering::Greater => {
|
||||
let diff = rx.len() - ry.len();
|
||||
let mut ry_ext = vec![Scalar::zero(); diff];
|
||||
ry_ext.extend(ry);
|
||||
(rx.to_vec(), ry_ext)
|
||||
}
|
||||
Ordering::Equal => (rx.to_vec(), ry.to_vec()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prove(
|
||||
dense: &MultiSparseMatPolynomialAsDense,
|
||||
rx: &Vec<Scalar>, // point at which the polynomial is evaluated
|
||||
ry: &Vec<Scalar>,
|
||||
evals: &Vec<Scalar>, // a vector evaluation of \widetilde{M}(r = (rx,ry)) for each M
|
||||
rx: &[Scalar], // point at which the polynomial is evaluated
|
||||
ry: &[Scalar],
|
||||
evals: &[Scalar], // a vector evaluation of \widetilde{M}(r = (rx,ry)) for each M
|
||||
gens: &SparseMatPolyCommitmentGens,
|
||||
transcript: &mut Transcript,
|
||||
random_tape: &mut RandomTape,
|
||||
|
@ -1559,9 +1560,9 @@ impl SparseMatPolyEvalProof {
|
|||
pub fn verify(
|
||||
&self,
|
||||
comm: &SparseMatPolyCommitment,
|
||||
rx: &Vec<Scalar>, // point at which the polynomial is evaluated
|
||||
ry: &Vec<Scalar>,
|
||||
evals: &Vec<Scalar>, // evaluation of \widetilde{M}(r = (rx,ry))
|
||||
rx: &[Scalar], // point at which the polynomial is evaluated
|
||||
ry: &[Scalar],
|
||||
evals: &[Scalar], // evaluation of \widetilde{M}(r = (rx,ry))
|
||||
gens: &SparseMatPolyCommitmentGens,
|
||||
transcript: &mut Transcript,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
|
@ -1621,7 +1622,7 @@ impl SparsePolynomial {
|
|||
SparsePolynomial { num_vars, Z }
|
||||
}
|
||||
|
||||
fn compute_chi(a: &Vec<bool>, r: &Vec<Scalar>) -> Scalar {
|
||||
fn compute_chi(a: &[bool], r: &[Scalar]) -> Scalar {
|
||||
assert_eq!(a.len(), r.len());
|
||||
let mut chi_i = Scalar::one();
|
||||
for j in 0..r.len() {
|
||||
|
@ -1635,7 +1636,7 @@ impl SparsePolynomial {
|
|||
}
|
||||
|
||||
// Takes O(n log n). TODO: do this in O(n) where n is the number of entries in Z
|
||||
pub fn evaluate(&self, r: &Vec<Scalar>) -> Scalar {
|
||||
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
|
||||
assert_eq!(self.num_vars, r.len());
|
||||
|
||||
(0..self.Z.len())
|
||||
|
|
|
@ -50,7 +50,7 @@ impl SNARK {
|
|||
inst: &R1CSInstance,
|
||||
decomm: &R1CSDecommitment,
|
||||
vars: Vec<Scalar>,
|
||||
input: &Vec<Scalar>,
|
||||
input: &[Scalar],
|
||||
gens: &SNARKGens,
|
||||
transcript: &mut Transcript,
|
||||
) -> Self {
|
||||
|
@ -111,7 +111,7 @@ impl SNARK {
|
|||
pub fn verify(
|
||||
&self,
|
||||
comm: &R1CSCommitment,
|
||||
input: &Vec<Scalar>,
|
||||
input: &[Scalar],
|
||||
transcript: &mut Transcript,
|
||||
gens: &SNARKGens,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
|
@ -178,7 +178,7 @@ impl NIZK {
|
|||
pub fn prove(
|
||||
inst: &R1CSInstance,
|
||||
vars: Vec<Scalar>,
|
||||
input: &Vec<Scalar>,
|
||||
input: &[Scalar],
|
||||
gens: &NIZKGens,
|
||||
transcript: &mut Transcript,
|
||||
) -> Self {
|
||||
|
@ -211,7 +211,7 @@ impl NIZK {
|
|||
pub fn verify(
|
||||
&self,
|
||||
inst: &R1CSInstance,
|
||||
input: &Vec<Scalar>,
|
||||
input: &[Scalar],
|
||||
transcript: &mut Transcript,
|
||||
gens: &NIZKGens,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
|
|
190
src/sumcheck.rs
190
src/sumcheck.rs
|
@ -1,3 +1,5 @@
|
|||
#![allow(clippy::too_many_arguments)]
|
||||
#![allow(clippy::type_complexity)]
|
||||
use super::commitments::{Commitments, MultiCommitGens};
|
||||
use super::dense_mlpoly::DensePolynomial;
|
||||
use super::errors::ProofVerifyError;
|
||||
|
@ -7,10 +9,10 @@ use super::random::RandomTape;
|
|||
use super::scalar::Scalar;
|
||||
use super::transcript::{AppendToTranscript, ProofTranscript};
|
||||
use super::unipoly::{CompressedUniPoly, UniPoly};
|
||||
use core::iter;
|
||||
use itertools::izip;
|
||||
use merlin::Transcript;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::iter;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct SumcheckInstanceProof {
|
||||
|
@ -135,7 +137,7 @@ impl ZKSumcheckInstanceProof {
|
|||
// the vector to use to decommit for sum-check test
|
||||
let a_sc = {
|
||||
let mut a = vec![Scalar::one(); degree_bound + 1];
|
||||
a[0] = a[0] + Scalar::one();
|
||||
a[0] += Scalar::one();
|
||||
a
|
||||
};
|
||||
|
||||
|
@ -143,7 +145,7 @@ impl ZKSumcheckInstanceProof {
|
|||
let a_eval = {
|
||||
let mut a = vec![Scalar::one(); degree_bound + 1];
|
||||
for j in 1..a.len() {
|
||||
a[j] = &a[j - 1] * &r_i;
|
||||
a[j] = a[j - 1] * r_i;
|
||||
}
|
||||
a
|
||||
};
|
||||
|
@ -151,7 +153,7 @@ impl ZKSumcheckInstanceProof {
|
|||
// take weighted sum of the two vectors using w
|
||||
assert_eq!(a_sc.len(), a_eval.len());
|
||||
(0..a_sc.len())
|
||||
.map(|i| &w[0] * &a_sc[i] + &w[1] * &a_eval[i])
|
||||
.map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
|
||||
.collect::<Vec<Scalar>>()
|
||||
};
|
||||
|
||||
|
@ -199,30 +201,28 @@ impl SumcheckInstanceProof {
|
|||
let len = poly_A.len() / 2;
|
||||
for i in 0..len {
|
||||
// eval 0: bound_func is A(low)
|
||||
eval_point_0 = &eval_point_0 + comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
|
||||
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
|
||||
|
||||
// eval 2: bound_func is -A(low) + 2*A(high)
|
||||
let poly_A_bound_point = &poly_A[len + i] + &poly_A[len + i] - &poly_A[i];
|
||||
let poly_B_bound_point = &poly_B[len + i] + &poly_B[len + i] - &poly_B[i];
|
||||
let poly_C_bound_point = &poly_C[len + i] + &poly_C[len + i] - &poly_C[i];
|
||||
eval_point_2 = &eval_point_2
|
||||
+ comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
|
||||
eval_point_2 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
|
||||
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
|
||||
let poly_A_bound_point = &poly_A_bound_point + &poly_A[len + i] - &poly_A[i];
|
||||
let poly_B_bound_point = &poly_B_bound_point + &poly_B[len + i] - &poly_B[i];
|
||||
let poly_C_bound_point = &poly_C_bound_point + &poly_C[len + i] - &poly_C[i];
|
||||
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
|
||||
|
||||
eval_point_3 = &eval_point_3
|
||||
+ comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
eval_point_3 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
}
|
||||
|
||||
let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3];
|
||||
|
@ -293,30 +293,28 @@ impl SumcheckInstanceProof {
|
|||
let len = poly_A.len() / 2;
|
||||
for i in 0..len {
|
||||
// eval 0: bound_func is A(low)
|
||||
eval_point_0 = &eval_point_0 + comb_func(&poly_A[i], &poly_B[i], &poly_C_par[i]);
|
||||
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C_par[i]);
|
||||
|
||||
// eval 2: bound_func is -A(low) + 2*A(high)
|
||||
let poly_A_bound_point = &poly_A[len + i] + &poly_A[len + i] - &poly_A[i];
|
||||
let poly_B_bound_point = &poly_B[len + i] + &poly_B[len + i] - &poly_B[i];
|
||||
let poly_C_bound_point = &poly_C_par[len + i] + &poly_C_par[len + i] - &poly_C_par[i];
|
||||
eval_point_2 = &eval_point_2
|
||||
+ comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C_par[len + i] + poly_C_par[len + i] - poly_C_par[i];
|
||||
eval_point_2 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
|
||||
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
|
||||
let poly_A_bound_point = &poly_A_bound_point + &poly_A[len + i] - &poly_A[i];
|
||||
let poly_B_bound_point = &poly_B_bound_point + &poly_B[len + i] - &poly_B[i];
|
||||
let poly_C_bound_point = &poly_C_bound_point + &poly_C_par[len + i] - &poly_C_par[i];
|
||||
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C_bound_point + poly_C_par[len + i] - poly_C_par[i];
|
||||
|
||||
eval_point_3 = &eval_point_3
|
||||
+ comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
eval_point_3 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
}
|
||||
|
||||
evals.push((eval_point_0, eval_point_2, eval_point_3));
|
||||
|
@ -333,27 +331,25 @@ impl SumcheckInstanceProof {
|
|||
let len = poly_A.len() / 2;
|
||||
for i in 0..len {
|
||||
// eval 0: bound_func is A(low)
|
||||
eval_point_0 = &eval_point_0 + comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
|
||||
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
|
||||
// eval 2: bound_func is -A(low) + 2*A(high)
|
||||
let poly_A_bound_point = &poly_A[len + i] + &poly_A[len + i] - &poly_A[i];
|
||||
let poly_B_bound_point = &poly_B[len + i] + &poly_B[len + i] - &poly_B[i];
|
||||
let poly_C_bound_point = &poly_C[len + i] + &poly_C[len + i] - &poly_C[i];
|
||||
eval_point_2 = &eval_point_2
|
||||
+ comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
|
||||
eval_point_2 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
|
||||
let poly_A_bound_point = &poly_A_bound_point + &poly_A[len + i] - &poly_A[i];
|
||||
let poly_B_bound_point = &poly_B_bound_point + &poly_B[len + i] - &poly_B[i];
|
||||
let poly_C_bound_point = &poly_C_bound_point + &poly_C[len + i] - &poly_C[i];
|
||||
eval_point_3 = &eval_point_3
|
||||
+ comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
|
||||
eval_point_3 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
}
|
||||
evals.push((eval_point_0, eval_point_2, eval_point_3));
|
||||
}
|
||||
|
@ -462,12 +458,12 @@ impl ZKSumcheckInstanceProof {
|
|||
let len = poly_A.len() / 2;
|
||||
for i in 0..len {
|
||||
// eval 0: bound_func is A(low)
|
||||
eval_point_0 = &eval_point_0 + comb_func(&poly_A[i], &poly_B[i]);
|
||||
eval_point_0 += comb_func(&poly_A[i], &poly_B[i]);
|
||||
|
||||
// eval 2: bound_func is -A(low) + 2*A(high)
|
||||
let poly_A_bound_point = &poly_A[len + i] + &poly_A[len + i] - &poly_A[i];
|
||||
let poly_B_bound_point = &poly_B[len + i] + &poly_B[len + i] - &poly_B[i];
|
||||
eval_point_2 = &eval_point_2 + comb_func(&poly_A_bound_point, &poly_B_bound_point);
|
||||
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
|
||||
eval_point_2 += comb_func(&poly_A_bound_point, &poly_B_bound_point);
|
||||
}
|
||||
|
||||
let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2];
|
||||
|
@ -509,7 +505,7 @@ impl ZKSumcheckInstanceProof {
|
|||
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
|
||||
|
||||
// compute a weighted sum of the RHS
|
||||
let target = &w[0] * &claim_per_round + &w[1] * &eval;
|
||||
let target = w[0] * claim_per_round + w[1] * eval;
|
||||
let comm_target = GroupElement::vartime_multiscalar_mul(
|
||||
w.iter(),
|
||||
iter::once(&comm_claim_per_round)
|
||||
|
@ -528,7 +524,7 @@ impl ZKSumcheckInstanceProof {
|
|||
|
||||
let blind_eval = &blinds_evals[j];
|
||||
|
||||
&w[0] * blind_sc + &w[1] * blind_eval
|
||||
w[0] * blind_sc + w[1] * blind_eval
|
||||
};
|
||||
assert_eq!(target.commit(&blind, &gens_1).compress(), comm_target);
|
||||
|
||||
|
@ -536,7 +532,7 @@ impl ZKSumcheckInstanceProof {
|
|||
// the vector to use to decommit for sum-check test
|
||||
let a_sc = {
|
||||
let mut a = vec![Scalar::one(); poly.degree() + 1];
|
||||
a[0] = a[0] + Scalar::one();
|
||||
a[0] += Scalar::one();
|
||||
a
|
||||
};
|
||||
|
||||
|
@ -544,7 +540,7 @@ impl ZKSumcheckInstanceProof {
|
|||
let a_eval = {
|
||||
let mut a = vec![Scalar::one(); poly.degree() + 1];
|
||||
for j in 1..a.len() {
|
||||
a[j] = &a[j - 1] * &r_j;
|
||||
a[j] = a[j - 1] * r_j;
|
||||
}
|
||||
a
|
||||
};
|
||||
|
@ -552,7 +548,7 @@ impl ZKSumcheckInstanceProof {
|
|||
// take weighted sum of the two vectors using w
|
||||
assert_eq!(a_sc.len(), a_eval.len());
|
||||
(0..a_sc.len())
|
||||
.map(|i| &w[0] * &a_sc[i] + &w[1] * &a_eval[i])
|
||||
.map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
|
||||
.collect::<Vec<Scalar>>()
|
||||
};
|
||||
|
||||
|
@ -626,33 +622,31 @@ impl ZKSumcheckInstanceProof {
|
|||
let len = poly_A.len() / 2;
|
||||
for i in 0..len {
|
||||
// eval 0: bound_func is A(low)
|
||||
eval_point_0 = &eval_point_0 + comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]);
|
||||
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]);
|
||||
|
||||
// eval 2: bound_func is -A(low) + 2*A(high)
|
||||
let poly_A_bound_point = &poly_A[len + i] + &poly_A[len + i] - &poly_A[i];
|
||||
let poly_B_bound_point = &poly_B[len + i] + &poly_B[len + i] - &poly_B[i];
|
||||
let poly_C_bound_point = &poly_C[len + i] + &poly_C[len + i] - &poly_C[i];
|
||||
let poly_D_bound_point = &poly_D[len + i] + &poly_D[len + i] - &poly_D[i];
|
||||
eval_point_2 = &eval_point_2
|
||||
+ comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
&poly_D_bound_point,
|
||||
);
|
||||
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
|
||||
let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i];
|
||||
eval_point_2 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
&poly_D_bound_point,
|
||||
);
|
||||
|
||||
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
|
||||
let poly_A_bound_point = &poly_A_bound_point + &poly_A[len + i] - &poly_A[i];
|
||||
let poly_B_bound_point = &poly_B_bound_point + &poly_B[len + i] - &poly_B[i];
|
||||
let poly_C_bound_point = &poly_C_bound_point + &poly_C[len + i] - &poly_C[i];
|
||||
let poly_D_bound_point = &poly_D_bound_point + &poly_D[len + i] - &poly_D[i];
|
||||
eval_point_3 = &eval_point_3
|
||||
+ comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
&poly_D_bound_point,
|
||||
);
|
||||
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
|
||||
let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i];
|
||||
eval_point_3 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
&poly_D_bound_point,
|
||||
);
|
||||
}
|
||||
|
||||
let evals = vec![
|
||||
|
@ -701,7 +695,7 @@ impl ZKSumcheckInstanceProof {
|
|||
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
|
||||
|
||||
// compute a weighted sum of the RHS
|
||||
let target = &w[0] * &claim_per_round + &w[1] * &eval;
|
||||
let target = w[0] * claim_per_round + w[1] * eval;
|
||||
let comm_target = GroupElement::vartime_multiscalar_mul(
|
||||
w.iter(),
|
||||
iter::once(&comm_claim_per_round)
|
||||
|
@ -720,7 +714,7 @@ impl ZKSumcheckInstanceProof {
|
|||
|
||||
let blind_eval = &blinds_evals[j];
|
||||
|
||||
&w[0] * blind_sc + &w[1] * blind_eval
|
||||
w[0] * blind_sc + w[1] * blind_eval
|
||||
};
|
||||
|
||||
assert_eq!(target.commit(&blind, &gens_1).compress(), comm_target);
|
||||
|
@ -729,7 +723,7 @@ impl ZKSumcheckInstanceProof {
|
|||
// the vector to use to decommit for sum-check test
|
||||
let a_sc = {
|
||||
let mut a = vec![Scalar::one(); poly.degree() + 1];
|
||||
a[0] = a[0] + Scalar::one();
|
||||
a[0] += Scalar::one();
|
||||
a
|
||||
};
|
||||
|
||||
|
@ -737,7 +731,7 @@ impl ZKSumcheckInstanceProof {
|
|||
let a_eval = {
|
||||
let mut a = vec![Scalar::one(); poly.degree() + 1];
|
||||
for j in 1..a.len() {
|
||||
a[j] = &a[j - 1] * &r_j;
|
||||
a[j] = a[j - 1] * r_j;
|
||||
}
|
||||
a
|
||||
};
|
||||
|
@ -745,7 +739,7 @@ impl ZKSumcheckInstanceProof {
|
|||
// take weighted sum of the two vectors using w
|
||||
assert_eq!(a_sc.len(), a_eval.len());
|
||||
(0..a_sc.len())
|
||||
.map(|i| &w[0] * &a_sc[i] + &w[1] * &a_eval[i])
|
||||
.map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
|
||||
.collect::<Vec<Scalar>>()
|
||||
};
|
||||
|
||||
|
|
15
src/timer.rs
15
src/timer.rs
|
@ -1,9 +1,11 @@
|
|||
#[cfg(feature = "profile")]
|
||||
use colored::Colorize;
|
||||
#[cfg(feature = "profile")]
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use core::sync::atomic::AtomicUsize;
|
||||
#[cfg(feature = "profile")]
|
||||
use std::{sync::atomic::Ordering, time::Instant};
|
||||
use core::sync::atomic::Ordering;
|
||||
#[cfg(feature = "profile")]
|
||||
use std::time::Instant;
|
||||
|
||||
#[cfg(feature = "profile")]
|
||||
pub static CALL_DEPTH: AtomicUsize = AtomicUsize::new(0);
|
||||
|
@ -20,10 +22,11 @@ impl Timer {
|
|||
pub fn new(label: &str) -> Self {
|
||||
let timer = Instant::now();
|
||||
CALL_DEPTH.fetch_add(1, Ordering::Relaxed);
|
||||
let star = "* ";
|
||||
println!(
|
||||
"{:indent$}{}{}",
|
||||
"",
|
||||
"* ",
|
||||
star,
|
||||
label.yellow().bold(),
|
||||
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
|
||||
);
|
||||
|
@ -36,10 +39,11 @@ impl Timer {
|
|||
#[inline(always)]
|
||||
pub fn stop(&self) {
|
||||
let duration = self.timer.elapsed();
|
||||
let star = "* ";
|
||||
println!(
|
||||
"{:indent$}{}{} {:?}",
|
||||
"",
|
||||
"* ",
|
||||
star,
|
||||
self.label.blue().bold(),
|
||||
duration,
|
||||
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
|
||||
|
@ -50,10 +54,11 @@ impl Timer {
|
|||
#[inline(always)]
|
||||
pub fn print(msg: &str) {
|
||||
CALL_DEPTH.fetch_add(1, Ordering::Relaxed);
|
||||
let star = "* ";
|
||||
println!(
|
||||
"{:indent$}{}{}",
|
||||
"",
|
||||
"* ",
|
||||
star,
|
||||
msg.to_string().green().bold(),
|
||||
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
|
||||
);
|
||||
|
|
|
@ -49,8 +49,8 @@ impl AppendToTranscript for Scalar {
|
|||
impl AppendToTranscript for Vec<Scalar> {
|
||||
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
|
||||
transcript.append_message(label, b"begin_append_vector");
|
||||
for i in 0..self.len() {
|
||||
transcript.append_scalar(label, &self[i]);
|
||||
for item in self {
|
||||
transcript.append_scalar(label, item);
|
||||
}
|
||||
transcript.append_message(label, b"end_append_vector");
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ pub struct CompressedUniPoly {
|
|||
}
|
||||
|
||||
impl UniPoly {
|
||||
pub fn from_evals(evals: &Vec<Scalar>) -> Self {
|
||||
pub fn from_evals(evals: &[Scalar]) -> Self {
|
||||
// we only support degree-2 or degree-3 univariate polynomials
|
||||
assert!(evals.len() == 3 || evals.len() == 4);
|
||||
let coeffs = if evals.len() == 3 {
|
||||
|
@ -73,8 +73,8 @@ impl UniPoly {
|
|||
let mut eval = self.coeffs[0];
|
||||
let mut power = *r;
|
||||
for i in 1..self.coeffs.len() {
|
||||
eval = &eval + &power * &self.coeffs[i];
|
||||
power = &power * r;
|
||||
eval += power * self.coeffs[i];
|
||||
power *= r;
|
||||
}
|
||||
eval
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ impl CompressedUniPoly {
|
|||
let mut linear_term =
|
||||
hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0];
|
||||
for i in 1..self.coeffs_except_linear_term.len() {
|
||||
linear_term = linear_term - self.coeffs_except_linear_term[i];
|
||||
linear_term -= self.coeffs_except_linear_term[i];
|
||||
}
|
||||
|
||||
let mut coeffs: Vec<Scalar> = Vec::new();
|
||||
|
|
Загрузка…
Ссылка в новой задаче