optimize the computation of digest of A/B/C matrices (#55)
* optimize the computation of digest of A/B/C matrices * update version * address clippy * address clippy
This commit is contained in:
Родитель
633a6cc16b
Коммит
5af241044f
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "spartan"
|
||||
version = "0.7.0"
|
||||
version = "0.7.1"
|
||||
authors = ["Srinath Setty <srinath@microsoft.com>"]
|
||||
edition = "2021"
|
||||
description = "High-speed zkSNARKs without trusted setup"
|
||||
|
|
|
@ -118,7 +118,7 @@ impl IdentityPolynomial {
|
|||
impl DensePolynomial {
|
||||
pub fn new(Z: Vec<Scalar>) -> Self {
|
||||
DensePolynomial {
|
||||
num_vars: Z.len().log_2() as usize,
|
||||
num_vars: Z.len().log_2(),
|
||||
len: Z.len(),
|
||||
Z,
|
||||
}
|
||||
|
|
14
src/lib.rs
14
src/lib.rs
|
@ -111,9 +111,10 @@ pub type VarsAssignment = Assignment;
|
|||
/// `InputsAssignment` holds an assignment of values to variables in an `Instance`
|
||||
pub type InputsAssignment = Assignment;
|
||||
|
||||
/// `Instance` holds the description of R1CS matrices
|
||||
/// `Instance` holds the description of R1CS matrices and a hash of the matrices
|
||||
pub struct Instance {
|
||||
inst: R1CSInstance,
|
||||
digest: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
|
@ -221,7 +222,9 @@ impl Instance {
|
|||
&C_scalar.unwrap(),
|
||||
);
|
||||
|
||||
Ok(Instance { inst })
|
||||
let digest = inst.get_digest();
|
||||
|
||||
Ok(Instance { inst, digest })
|
||||
}
|
||||
|
||||
/// Checks if a given R1CSInstance is satisfiable with a given variables and inputs assignments
|
||||
|
@ -263,8 +266,9 @@ impl Instance {
|
|||
num_inputs: usize,
|
||||
) -> (Instance, VarsAssignment, InputsAssignment) {
|
||||
let (inst, vars, inputs) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
let digest = inst.get_digest();
|
||||
(
|
||||
Instance { inst },
|
||||
Instance { inst, digest },
|
||||
VarsAssignment { assignment: vars },
|
||||
InputsAssignment { assignment: inputs },
|
||||
)
|
||||
|
@ -507,7 +511,7 @@ impl NIZK {
|
|||
let mut random_tape = RandomTape::new(b"proof");
|
||||
|
||||
transcript.append_protocol_name(NIZK::protocol_name());
|
||||
inst.inst.append_to_transcript(b"inst", transcript);
|
||||
transcript.append_message(b"R1CSInstanceDigest", &inst.digest);
|
||||
|
||||
let (r1cs_sat_proof, rx, ry) = {
|
||||
// we might need to pad variables
|
||||
|
@ -552,7 +556,7 @@ impl NIZK {
|
|||
let timer_verify = Timer::new("NIZK::verify");
|
||||
|
||||
transcript.append_protocol_name(NIZK::protocol_name());
|
||||
inst.inst.append_to_transcript(b"inst", transcript);
|
||||
transcript.append_message(b"R1CSInstanceDigest", &inst.digest);
|
||||
|
||||
// We send evaluations of A, B, C at r = (rx, ry) as claims
|
||||
// to enable the verifier complete the first sum-check
|
||||
|
|
|
@ -56,7 +56,7 @@ impl BulletReductionProof {
|
|||
// All of the input vectors must have a length that is a power of two.
|
||||
let mut n = G.len();
|
||||
assert!(n.is_power_of_two());
|
||||
let lg_n = n.log_2() as usize;
|
||||
let lg_n = n.log_2();
|
||||
|
||||
// All of the input vectors must have the same length.
|
||||
assert_eq!(G.len(), n);
|
||||
|
|
|
@ -37,7 +37,7 @@ impl ProductCircuit {
|
|||
let mut left_vec: Vec<DensePolynomial> = Vec::new();
|
||||
let mut right_vec: Vec<DensePolynomial> = Vec::new();
|
||||
|
||||
let num_layers = poly.len().log_2() as usize;
|
||||
let num_layers = poly.len().log_2();
|
||||
let (outp_left, outp_right) = poly.split(poly.len() / 2);
|
||||
|
||||
left_vec.push(outp_left);
|
||||
|
@ -182,7 +182,7 @@ impl ProductCircuitEvalProof {
|
|||
let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
|
||||
assert_eq!(poly_C.len(), len / 2);
|
||||
|
||||
let num_rounds_prod = poly_C.len().log_2() as usize;
|
||||
let num_rounds_prod = poly_C.len().log_2();
|
||||
let comb_func_prod = |poly_A_comp: &Scalar,
|
||||
poly_B_comp: &Scalar,
|
||||
poly_C_comp: &Scalar|
|
||||
|
@ -223,7 +223,7 @@ impl ProductCircuitEvalProof {
|
|||
len: usize,
|
||||
transcript: &mut Transcript,
|
||||
) -> (Scalar, Vec<Scalar>) {
|
||||
let num_layers = len.log_2() as usize;
|
||||
let num_layers = len.log_2();
|
||||
let mut claim = eval;
|
||||
let mut rand: Vec<Scalar> = Vec::new();
|
||||
//let mut num_rounds = 0;
|
||||
|
@ -279,7 +279,7 @@ impl ProductCircuitEvalProofBatched {
|
|||
let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
|
||||
assert_eq!(poly_C_par.len(), len / 2);
|
||||
|
||||
let num_rounds_prod = poly_C_par.len().log_2() as usize;
|
||||
let num_rounds_prod = poly_C_par.len().log_2();
|
||||
let comb_func_prod = |poly_A_comp: &Scalar,
|
||||
poly_B_comp: &Scalar,
|
||||
poly_C_comp: &Scalar|
|
||||
|
@ -389,7 +389,7 @@ impl ProductCircuitEvalProofBatched {
|
|||
len: usize,
|
||||
transcript: &mut Transcript,
|
||||
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
|
||||
let num_layers = len.log_2() as usize;
|
||||
let num_layers = len.log_2();
|
||||
let mut rand: Vec<Scalar> = Vec::new();
|
||||
//let mut num_rounds = 0;
|
||||
assert_eq!(self.proof.len(), num_layers);
|
||||
|
|
|
@ -25,15 +25,6 @@ pub struct R1CSInstance {
|
|||
C: SparseMatPolynomial,
|
||||
}
|
||||
|
||||
impl AppendToTranscript for R1CSInstance {
|
||||
fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) {
|
||||
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
|
||||
bincode::serialize_into(&mut encoder, &self).unwrap();
|
||||
let bytes = encoder.finish().unwrap();
|
||||
transcript.append_message(b"R1CSInstance", &bytes);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct R1CSCommitmentGens {
|
||||
gens: SparseMatPolyCommitmentGens,
|
||||
}
|
||||
|
@ -47,8 +38,8 @@ impl R1CSCommitmentGens {
|
|||
num_nz_entries: usize,
|
||||
) -> R1CSCommitmentGens {
|
||||
assert!(num_inputs < num_vars);
|
||||
let num_poly_vars_x = num_cons.log_2() as usize;
|
||||
let num_poly_vars_y = (2 * num_vars).log_2() as usize;
|
||||
let num_poly_vars_x = num_cons.log_2();
|
||||
let num_poly_vars_y = (2 * num_vars).log_2();
|
||||
let gens =
|
||||
SparseMatPolyCommitmentGens::new(label, num_poly_vars_x, num_poly_vars_y, num_nz_entries, 3);
|
||||
R1CSCommitmentGens { gens }
|
||||
|
@ -116,8 +107,8 @@ impl R1CSInstance {
|
|||
assert!(num_inputs < num_vars);
|
||||
|
||||
// no errors, so create polynomials
|
||||
let num_poly_vars_x = num_cons.log_2() as usize;
|
||||
let num_poly_vars_y = (2 * num_vars).log_2() as usize;
|
||||
let num_poly_vars_x = num_cons.log_2();
|
||||
let num_poly_vars_y = (2 * num_vars).log_2();
|
||||
|
||||
let mat_A = (0..A.len())
|
||||
.map(|i| SparseMatEntry::new(A[i].0, A[i].1, A[i].2))
|
||||
|
@ -155,6 +146,12 @@ impl R1CSInstance {
|
|||
self.num_inputs
|
||||
}
|
||||
|
||||
pub fn get_digest(&self) -> Vec<u8> {
|
||||
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
|
||||
bincode::serialize_into(&mut encoder, &self).unwrap();
|
||||
encoder.finish().unwrap()
|
||||
}
|
||||
|
||||
pub fn produce_synthetic_r1cs(
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
|
@ -167,8 +164,8 @@ impl R1CSInstance {
|
|||
let mut csprng: OsRng = OsRng;
|
||||
|
||||
// assert num_cons and num_vars are power of 2
|
||||
assert_eq!((num_cons.log_2() as usize).pow2(), num_cons);
|
||||
assert_eq!((num_vars.log_2() as usize).pow2(), num_vars);
|
||||
assert_eq!((num_cons.log_2()).pow2(), num_cons);
|
||||
assert_eq!((num_vars.log_2()).pow2(), num_vars);
|
||||
|
||||
// num_inputs + 1 <= num_vars
|
||||
assert!(num_inputs < num_vars);
|
||||
|
@ -215,8 +212,8 @@ impl R1CSInstance {
|
|||
Timer::print(&format!("number_non-zero_entries_B {}", B.len()));
|
||||
Timer::print(&format!("number_non-zero_entries_C {}", C.len()));
|
||||
|
||||
let num_poly_vars_x = num_cons.log_2() as usize;
|
||||
let num_poly_vars_y = (2 * num_vars).log_2() as usize;
|
||||
let num_poly_vars_x = num_cons.log_2();
|
||||
let num_poly_vars_y = (2 * num_vars).log_2();
|
||||
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A);
|
||||
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B);
|
||||
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C);
|
||||
|
@ -261,7 +258,7 @@ impl R1CSInstance {
|
|||
assert_eq!(Bz.len(), self.num_cons);
|
||||
assert_eq!(Cz.len(), self.num_cons);
|
||||
let res: usize = (0..self.num_cons)
|
||||
.map(|i| if Az[i] * Bz[i] == Cz[i] { 0 } else { 1 })
|
||||
.map(|i| usize::from(Az[i] * Bz[i] != Cz[i]))
|
||||
.sum();
|
||||
|
||||
res == 0
|
||||
|
|
|
@ -64,7 +64,7 @@ pub struct R1CSGens {
|
|||
|
||||
impl R1CSGens {
|
||||
pub fn new(label: &'static [u8], _num_cons: usize, num_vars: usize) -> Self {
|
||||
let num_poly_vars = num_vars.log_2() as usize;
|
||||
let num_poly_vars = num_vars.log_2();
|
||||
let gens_pc = PolyCommitmentGens::new(num_poly_vars, label);
|
||||
let gens_sc = R1CSSumcheckGens::new(label, &gens_pc.gens.gens_1);
|
||||
R1CSGens { gens_sc, gens_pc }
|
||||
|
@ -183,10 +183,7 @@ impl R1CSProof {
|
|||
};
|
||||
|
||||
// derive the verifier's challenge tau
|
||||
let (num_rounds_x, num_rounds_y) = (
|
||||
inst.get_num_cons().log_2() as usize,
|
||||
z.len().log_2() as usize,
|
||||
);
|
||||
let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log_2(), z.len().log_2());
|
||||
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
|
||||
// compute the initial evaluation table for R(\tau, x)
|
||||
let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals());
|
||||
|
@ -368,7 +365,7 @@ impl R1CSProof {
|
|||
.comm_vars
|
||||
.append_to_transcript(b"poly_commitment", transcript);
|
||||
|
||||
let (num_rounds_x, num_rounds_y) = (num_cons.log_2() as usize, (2 * num_vars).log_2() as usize);
|
||||
let (num_rounds_x, num_rounds_y) = (num_cons.log_2(), (2 * num_vars).log_2());
|
||||
|
||||
// derive the verifier's challenge tau
|
||||
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
|
||||
|
@ -464,7 +461,7 @@ impl R1CSProof {
|
|||
.map(|i| SparsePolyEntry::new(i + 1, input[i]))
|
||||
.collect::<Vec<SparsePolyEntry>>(),
|
||||
);
|
||||
SparsePolynomial::new(n.log_2() as usize, input_as_sparse_poly_entries).evaluate(&ry[1..])
|
||||
SparsePolynomial::new(n.log_2(), input_as_sparse_poly_entries).evaluate(&ry[1..])
|
||||
};
|
||||
|
||||
// compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval
|
||||
|
|
|
@ -89,10 +89,7 @@ impl DerefsEvalProof {
|
|||
transcript: &mut Transcript,
|
||||
random_tape: &mut RandomTape,
|
||||
) -> PolyEvalProof {
|
||||
assert_eq!(
|
||||
joint_poly.get_num_vars(),
|
||||
r.len() + evals.len().log_2() as usize
|
||||
);
|
||||
assert_eq!(joint_poly.get_num_vars(), r.len() + evals.len().log_2());
|
||||
|
||||
// append the claimed evaluations to transcript
|
||||
evals.append_to_transcript(b"evals_ops_val", transcript);
|
||||
|
@ -100,7 +97,7 @@ impl DerefsEvalProof {
|
|||
// n-to-1 reduction
|
||||
let (r_joint, eval_joint) = {
|
||||
let challenges =
|
||||
transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log_2() as usize);
|
||||
transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log_2());
|
||||
let mut poly_evals = DensePolynomial::new(evals);
|
||||
for i in (0..challenges.len()).rev() {
|
||||
poly_evals.bound_poly_var_bot(&challenges[i]);
|
||||
|
@ -166,7 +163,7 @@ impl DerefsEvalProof {
|
|||
|
||||
// n-to-1 reduction
|
||||
let challenges =
|
||||
transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log_2() as usize);
|
||||
transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log_2());
|
||||
let mut poly_evals = DensePolynomial::new(evals);
|
||||
for i in (0..challenges.len()).rev() {
|
||||
poly_evals.bound_poly_var_bot(&challenges[i]);
|
||||
|
@ -300,15 +297,15 @@ impl SparseMatPolyCommitmentGens {
|
|||
num_nz_entries: usize,
|
||||
batch_size: usize,
|
||||
) -> SparseMatPolyCommitmentGens {
|
||||
let num_vars_ops = num_nz_entries.next_power_of_two().log_2() as usize
|
||||
+ (batch_size * 5).next_power_of_two().log_2() as usize;
|
||||
let num_vars_ops =
|
||||
num_nz_entries.next_power_of_two().log_2() + (batch_size * 5).next_power_of_two().log_2();
|
||||
let num_vars_mem = if num_vars_x > num_vars_y {
|
||||
num_vars_x
|
||||
} else {
|
||||
num_vars_y
|
||||
} + 1;
|
||||
let num_vars_derefs = num_nz_entries.next_power_of_two().log_2() as usize
|
||||
+ (batch_size * 2).next_power_of_two().log_2() as usize;
|
||||
let num_vars_derefs =
|
||||
num_nz_entries.next_power_of_two().log_2() + (batch_size * 2).next_power_of_two().log_2();
|
||||
|
||||
let gens_ops = PolyCommitmentGens::new(num_vars_ops, label);
|
||||
let gens_mem = PolyCommitmentGens::new(num_vars_mem, label);
|
||||
|
@ -778,10 +775,8 @@ impl HashLayerProof {
|
|||
evals_ops.extend(&eval_val_vec);
|
||||
evals_ops.resize(evals_ops.len().next_power_of_two(), Scalar::zero());
|
||||
evals_ops.append_to_transcript(b"claim_evals_ops", transcript);
|
||||
let challenges_ops = transcript.challenge_vector(
|
||||
b"challenge_combine_n_to_one",
|
||||
evals_ops.len().log_2() as usize,
|
||||
);
|
||||
let challenges_ops =
|
||||
transcript.challenge_vector(b"challenge_combine_n_to_one", evals_ops.len().log_2());
|
||||
|
||||
let mut poly_evals_ops = DensePolynomial::new(evals_ops);
|
||||
for i in (0..challenges_ops.len()).rev() {
|
||||
|
@ -807,10 +802,8 @@ impl HashLayerProof {
|
|||
// form a single decommitment using comb_comb_mem at rand_mem
|
||||
let evals_mem: Vec<Scalar> = vec![eval_row_audit_ts, eval_col_audit_ts];
|
||||
evals_mem.append_to_transcript(b"claim_evals_mem", transcript);
|
||||
let challenges_mem = transcript.challenge_vector(
|
||||
b"challenge_combine_two_to_one",
|
||||
evals_mem.len().log_2() as usize,
|
||||
);
|
||||
let challenges_mem =
|
||||
transcript.challenge_vector(b"challenge_combine_two_to_one", evals_mem.len().log_2());
|
||||
|
||||
let mut poly_evals_mem = DensePolynomial::new(evals_mem);
|
||||
for i in (0..challenges_mem.len()).rev() {
|
||||
|
@ -952,10 +945,8 @@ impl HashLayerProof {
|
|||
evals_ops.extend(eval_val_vec);
|
||||
evals_ops.resize(evals_ops.len().next_power_of_two(), Scalar::zero());
|
||||
evals_ops.append_to_transcript(b"claim_evals_ops", transcript);
|
||||
let challenges_ops = transcript.challenge_vector(
|
||||
b"challenge_combine_n_to_one",
|
||||
evals_ops.len().log_2() as usize,
|
||||
);
|
||||
let challenges_ops =
|
||||
transcript.challenge_vector(b"challenge_combine_n_to_one", evals_ops.len().log_2());
|
||||
|
||||
let mut poly_evals_ops = DensePolynomial::new(evals_ops);
|
||||
for i in (0..challenges_ops.len()).rev() {
|
||||
|
@ -978,10 +969,8 @@ impl HashLayerProof {
|
|||
// form a single decommitment using comb_comb_mem at rand_mem
|
||||
let evals_mem: Vec<Scalar> = vec![*eval_row_audit_ts, *eval_col_audit_ts];
|
||||
evals_mem.append_to_transcript(b"claim_evals_mem", transcript);
|
||||
let challenges_mem = transcript.challenge_vector(
|
||||
b"challenge_combine_two_to_one",
|
||||
evals_mem.len().log_2() as usize,
|
||||
);
|
||||
let challenges_mem =
|
||||
transcript.challenge_vector(b"challenge_combine_two_to_one", evals_mem.len().log_2());
|
||||
|
||||
let mut poly_evals_mem = DensePolynomial::new(evals_mem);
|
||||
for i in (0..challenges_mem.len()).rev() {
|
||||
|
@ -1629,8 +1618,8 @@ mod tests {
|
|||
let num_nz_entries: usize = 256;
|
||||
let num_rows: usize = 256;
|
||||
let num_cols: usize = 256;
|
||||
let num_vars_x: usize = num_rows.log_2() as usize;
|
||||
let num_vars_y: usize = num_cols.log_2() as usize;
|
||||
let num_vars_x: usize = num_rows.log_2();
|
||||
let num_vars_y: usize = num_cols.log_2();
|
||||
|
||||
let mut M: Vec<SparseMatEntry> = Vec::new();
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче