halo2_axiom/poly/ipa/commitment/
prover.rs

1use ff::Field;
2use rand_core::RngCore;
3
4use super::ParamsIPA;
5use crate::arithmetic::{
6    best_multiexp, compute_inner_product, eval_polynomial, parallelize, CurveAffine,
7};
8
9use crate::poly::commitment::ParamsProver;
10use crate::poly::{commitment::Blind, Coeff, Polynomial};
11use crate::transcript::{EncodedChallenge, TranscriptWrite};
12
13use group::Curve;
14use std::io::{self};
15
16/// Create a polynomial commitment opening proof for the polynomial defined
17/// by the coefficients `px`, the blinding factor `blind` used for the
18/// polynomial commitment, and the point `x` that the polynomial is
19/// evaluated at.
20///
21/// This function will panic if the provided polynomial is too large with
22/// respect to the polynomial commitment parameters.
23///
24/// **Important:** This function assumes that the provided `transcript` has
25/// already seen the common inputs: the polynomial commitment P, the claimed
26/// opening v, and the point x. It's probably also nice for the transcript
27/// to have seen the elliptic curve description and the URS, if you want to
28/// be rigorous.
29pub fn create_proof<
30    C: CurveAffine,
31    E: EncodedChallenge<C>,
32    R: RngCore,
33    T: TranscriptWrite<C, E>,
34>(
35    params: &ParamsIPA<C>,
36    mut rng: R,
37    transcript: &mut T,
38    p_poly: &Polynomial<C::Scalar, Coeff>,
39    p_blind: Blind<C::Scalar>,
40    x_3: C::Scalar,
41) -> io::Result<()> {
42    // We're limited to polynomials of degree n - 1.
43    assert_eq!(p_poly.len(), params.n as usize);
44
45    // Sample a random polynomial (of same degree) that has a root at x_3, first
46    // by setting all coefficients to random values.
47    let mut s_poly = (*p_poly).clone();
48    for coeff in s_poly.iter_mut() {
49        *coeff = C::Scalar::random(&mut rng);
50    }
51    // Evaluate the random polynomial at x_3
52    let s_at_x3 = eval_polynomial(&s_poly[..], x_3);
53    // Subtract constant coefficient to get a random polynomial with a root at x_3
54    s_poly[0] -= &s_at_x3;
55    // And sample a random blind
56    let s_poly_blind = Blind(C::Scalar::random(&mut rng));
57
58    // Write a commitment to the random polynomial to the transcript
59    let s_poly_commitment = params.commit(&s_poly, s_poly_blind).to_affine();
60    transcript.write_point(s_poly_commitment)?;
61
62    // Challenge that will ensure that the prover cannot change P but can only
63    // witness a random polynomial commitment that agrees with P at x_3, with high
64    // probability.
65    let xi = *transcript.squeeze_challenge_scalar::<()>();
66
67    // Challenge that ensures that the prover did not interfere with the U term
68    // in their commitments.
69    let z = *transcript.squeeze_challenge_scalar::<()>();
70
71    // We'll be opening `P' = P - [v] G_0 + [ξ] S` to ensure it has a root at
72    // zero.
73    let mut p_prime_poly = s_poly * xi + p_poly;
74    let v = eval_polynomial(&p_prime_poly, x_3);
75    p_prime_poly[0] -= &v;
76    let p_prime_blind = s_poly_blind * Blind(xi) + p_blind;
77
78    // This accumulates the synthetic blinding factor `f` starting
79    // with the blinding factor for `P'`.
80    let mut f = p_prime_blind.0;
81
82    // Initialize the vector `p_prime` as the coefficients of the polynomial.
83    let mut p_prime = p_prime_poly.values;
84    assert_eq!(p_prime.len(), params.n as usize);
85
86    // Initialize the vector `b` as the powers of `x_3`. The inner product of
87    // `p_prime` and `b` is the evaluation of the polynomial at `x_3`.
88    let mut b = Vec::with_capacity(1 << params.k);
89    {
90        let mut cur = C::Scalar::ONE;
91        for _ in 0..(1 << params.k) {
92            b.push(cur);
93            cur *= &x_3;
94        }
95    }
96
97    // Initialize the vector `G'` from the URS. We'll be progressively collapsing
98    // this vector into smaller and smaller vectors until it is of length 1.
99    let mut g_prime = params.g.clone();
100
101    // Perform the inner product argument, round by round.
102    for j in 0..params.k {
103        let half = 1 << (params.k - j - 1); // half the length of `p_prime`, `b`, `G'`
104
105        // Compute L, R
106        //
107        // TODO: If we modify multiexp to take "extra" bases, we could speed
108        // this piece up a bit by combining the multiexps.
109        let l_j = best_multiexp(&p_prime[half..], &g_prime[0..half]);
110        let r_j = best_multiexp(&p_prime[0..half], &g_prime[half..]);
111        let value_l_j = compute_inner_product(&p_prime[half..], &b[0..half]);
112        let value_r_j = compute_inner_product(&p_prime[0..half], &b[half..]);
113        let l_j_randomness = C::Scalar::random(&mut rng);
114        let r_j_randomness = C::Scalar::random(&mut rng);
115        let l_j = l_j + &best_multiexp(&[value_l_j * &z, l_j_randomness], &[params.u, params.w]);
116        let r_j = r_j + &best_multiexp(&[value_r_j * &z, r_j_randomness], &[params.u, params.w]);
117        let l_j = l_j.to_affine();
118        let r_j = r_j.to_affine();
119
120        // Feed L and R into the real transcript
121        transcript.write_point(l_j)?;
122        transcript.write_point(r_j)?;
123
124        let u_j = *transcript.squeeze_challenge_scalar::<()>();
125        let u_j_inv = u_j.invert().unwrap(); // TODO, bubble this up
126
127        // Collapse `p_prime` and `b`.
128        // TODO: parallelize
129        for i in 0..half {
130            p_prime[i] = p_prime[i] + &(p_prime[i + half] * &u_j_inv);
131            b[i] = b[i] + &(b[i + half] * &u_j);
132        }
133        p_prime.truncate(half);
134        b.truncate(half);
135
136        // Collapse `G'`
137        parallel_generator_collapse(&mut g_prime, u_j);
138        g_prime.truncate(half);
139
140        // Update randomness (the synthetic blinding factor at the end)
141        f += &(l_j_randomness * &u_j_inv);
142        f += &(r_j_randomness * &u_j);
143    }
144
145    // We have fully collapsed `p_prime`, `b`, `G'`
146    assert_eq!(p_prime.len(), 1);
147    let c = p_prime[0];
148
149    transcript.write_scalar(c)?;
150    transcript.write_scalar(f)?;
151
152    Ok(())
153}
154
155fn parallel_generator_collapse<C: CurveAffine>(g: &mut [C], challenge: C::Scalar) {
156    let len = g.len() / 2;
157    let (g_lo, g_hi) = g.split_at_mut(len);
158
159    parallelize(g_lo, |g_lo, start| {
160        let g_hi = &g_hi[start..];
161        let mut tmp = Vec::with_capacity(g_lo.len());
162        for (g_lo, g_hi) in g_lo.iter().zip(g_hi.iter()) {
163            tmp.push(g_lo.to_curve() + &(*g_hi * challenge));
164        }
165        C::Curve::batch_normalize(&tmp, g_lo);
166    });
167}