p3_fri/
two_adic_pcs.rs

1//! The FRI PCS protocol over two-adic fields.
2//!
3//! The following implements a slight variant of the usual FRI protocol. As usual we start
4//! with a polynomial `F(x)` of degree `n` given as evaluations over the coset `gH` with `|H| = 2^n`.
5//!
6//! Now consider the polynomial `G(x) = F(gx)`. Note that `G(x)` has the same degree as `F(x)` and
7//! the evaluations of `F(x)` over `gH` are identical to the evaluations of `G(x)` over `H`.
8//!
9//! Hence we can reinterpret our vector of evaluations as evaluations of `G(x)` over `H` and apply
10//! the standard FRI protocol to this evaluation vector. This makes it easier to apply FRI to a collection
11//! of polynomials defined over different cosets as we don't need to keep track of the coset shifts. We
12//! can just assume that every polynomial is defined over the subgroup of the relevant size.
13//!
14//! If we changed our domain construction (e.g., using multiple cosets), we would need to carefully reconsider these assumptions.
15
16use alloc::vec;
17use alloc::vec::Vec;
18use core::fmt::Debug;
19use core::marker::PhantomData;
20
21use itertools::{Itertools, izip};
22use p3_challenger::{CanObserve, FieldChallenger, GrindingChallenger};
23use p3_commit::{BatchOpening, Mmcs, OpenedValues, Pcs};
24use p3_dft::TwoAdicSubgroupDft;
25use p3_field::coset::TwoAdicMultiplicativeCoset;
26use p3_field::{
27    ExtensionField, PackedFieldExtension, TwoAdicField, batch_multiplicative_inverse, dot_product,
28};
29use p3_interpolation::interpolate_coset_with_precomputation;
30use p3_matrix::Matrix;
31use p3_matrix::bitrev::{BitReversedMatrixView, BitReversibleMatrix};
32use p3_matrix::dense::{RowMajorMatrix, RowMajorMatrixView};
33use p3_maybe_rayon::prelude::*;
34use p3_util::linear_map::LinearMap;
35use p3_util::{log2_strict_usize, reverse_bits_len, reverse_slice_index_bits};
36use tracing::{info_span, instrument};
37
38use crate::verifier::{self, FriError};
39use crate::{FriFoldingStrategy, FriParameters, FriProof, prover};
40
41/// A polynomial commitment scheme using FRI to generate opening proofs.
42///
43/// We commit to a polynomial `f` via its evaluation vectors over a coset
44/// `gH` where `|H| >= 2 * deg(f)`. A value `f(z)` is opened by using a FRI
45/// proof to show that the evaluations of `(f(x) - f(z))/(x - z)` over
46/// `gH` are low degree.
47#[derive(Debug)]
48pub struct TwoAdicFriPcs<Val, Dft, InputMmcs, FriMmcs> {
49    pub(crate) dft: Dft,
50    pub(crate) mmcs: InputMmcs,
51    pub(crate) fri: FriParameters<FriMmcs>,
52    _phantom: PhantomData<Val>,
53}
54
55impl<Val, Dft, InputMmcs, FriMmcs> TwoAdicFriPcs<Val, Dft, InputMmcs, FriMmcs> {
56    pub const fn new(dft: Dft, mmcs: InputMmcs, fri: FriParameters<FriMmcs>) -> Self {
57        Self {
58            dft,
59            mmcs,
60            fri,
61            _phantom: PhantomData,
62        }
63    }
64}
65
66/// The Prover Data associated to a commitment to a collection of matrices
67/// and a list of points to open each matrix at.
68pub type ProverDataWithOpeningPoints<'a, EF, ProverData> = (
69    // The matrices and auxiliary prover data
70    &'a ProverData,
71    // for each matrix,
72    Vec<
73        // points to open
74        Vec<EF>,
75    >,
76);
77
78/// A joint commitment to a collection of matrices and their opening at
79/// a collection of points.
80pub type CommitmentWithOpeningPoints<Challenge, Commitment, Domain> = (
81    Commitment,
82    // For each matrix in the commitment:
83    Vec<(
84        // The domain of the matrix
85        Domain,
86        // A vector of (point, claimed_evaluation) pairs
87        Vec<(Challenge, Vec<Challenge>)>,
88    )>,
89);
90
91pub struct TwoAdicFriFolding<InputProof, InputError>(pub PhantomData<(InputProof, InputError)>);
92
93pub type TwoAdicFriFoldingForMmcs<F, M> =
94    TwoAdicFriFolding<Vec<BatchOpening<F, M>>, <M as Mmcs<F>>::Error>;
95
96impl<F: TwoAdicField, InputProof, InputError: Debug, EF: ExtensionField<F>>
97    FriFoldingStrategy<F, EF> for TwoAdicFriFolding<InputProof, InputError>
98{
99    type InputProof = InputProof;
100    type InputError = InputError;
101
102    fn extra_query_index_bits(&self) -> usize {
103        0
104    }
105
106    fn fold_row(
107        &self,
108        index: usize,
109        log_height: usize,
110        beta: EF,
111        evals: impl Iterator<Item = EF>,
112    ) -> EF {
113        let arity = 2;
114        let log_arity = 1;
115        let (e0, e1) = evals
116            .collect_tuple()
117            .expect("TwoAdicFriFolder only supports arity=2");
118        // If performance critical, make this API stateful to avoid this
119        // This is a bit more math than is necessary, but leaving it here
120        // in case we want higher arity in the future.
121        let subgroup_start = F::two_adic_generator(log_height + log_arity)
122            .exp_u64(reverse_bits_len(index, log_height) as u64);
123        let mut xs = F::two_adic_generator(log_arity)
124            .shifted_powers(subgroup_start)
125            .collect_n(arity);
126        reverse_slice_index_bits(&mut xs);
127        assert_eq!(log_arity, 1, "can only interpolate two points for now");
128        // interpolate and evaluate at beta
129        e0 + (beta - xs[0]) * (e1 - e0) * (xs[1] - xs[0]).inverse()
130        // Currently Algebra<F> does not include division so we do it manually.
131        // Note we do not want to do an EF division as that is far more expensive.
132    }
133
134    fn fold_matrix<M: Matrix<EF>>(&self, beta: EF, m: M) -> Vec<EF> {
135        // We use the fact that
136        //     p_e(x^2) = (p(x) + p(-x)) / 2
137        //     p_o(x^2) = (p(x) - p(-x)) / (2 x)
138        // that is,
139        //     p_e(g^(2i)) = (p(g^i) + p(g^(n/2 + i))) / 2
140        //     p_o(g^(2i)) = (p(g^i) - p(g^(n/2 + i))) / (2 g^i)
141        // so
142        //     result(g^(2i)) = p_e(g^(2i)) + beta p_o(g^(2i))
143        //
144        // As p_e, p_o will be in the extension field we want to find ways to avoid extension multiplications.
145        // We should only need a single one (namely multiplication by beta).
146        let g_inv = F::two_adic_generator(log2_strict_usize(m.height()) + 1).inverse();
147
148        // TODO: vectorize this (after we have packed extension fields)
149
150        // As beta is in the extension field, we want to avoid multiplying by it
151        // for as long as possible. Here we precompute the powers  `g_inv^i / 2` in the base field.
152        let mut halve_inv_powers = g_inv.shifted_powers(F::ONE.halve()).collect_n(m.height());
153        reverse_slice_index_bits(&mut halve_inv_powers);
154
155        m.par_rows()
156            .zip(halve_inv_powers)
157            .map(|(mut row, halve_inv_power)| {
158                let (lo, hi) = row.next_tuple().unwrap();
159                (lo + hi).halve() + (lo - hi) * beta * halve_inv_power
160            })
161            .collect()
162    }
163}
164
165impl<Val, Dft, InputMmcs, FriMmcs, Challenge, Challenger> Pcs<Challenge, Challenger>
166    for TwoAdicFriPcs<Val, Dft, InputMmcs, FriMmcs>
167where
168    Val: TwoAdicField,
169    Dft: TwoAdicSubgroupDft<Val>,
170    InputMmcs: Mmcs<Val>,
171    FriMmcs: Mmcs<Challenge>,
172    Challenge: ExtensionField<Val>,
173    Challenger:
174        FieldChallenger<Val> + CanObserve<FriMmcs::Commitment> + GrindingChallenger<Witness = Val>,
175{
176    type Domain = TwoAdicMultiplicativeCoset<Val>;
177    type Commitment = InputMmcs::Commitment;
178    type ProverData = InputMmcs::ProverData<RowMajorMatrix<Val>>;
179    type EvaluationsOnDomain<'a> = BitReversedMatrixView<RowMajorMatrixView<'a, Val>>;
180    type Proof = FriProof<Challenge, FriMmcs, Val, Vec<BatchOpening<Val, InputMmcs>>>;
181    type Error = FriError<FriMmcs::Error, InputMmcs::Error>;
182    const ZK: bool = false;
183
184    /// Get the unique subgroup `H` of size `|H| = degree`.
185    ///
186    /// # Panics:
187    /// This function will panic if `degree` is not a power of 2 or `degree > (1 << Val::TWO_ADICITY)`.
188    fn natural_domain_for_degree(&self, degree: usize) -> Self::Domain {
189        TwoAdicMultiplicativeCoset::new(Val::ONE, log2_strict_usize(degree)).unwrap()
190    }
191
192    /// Commit to a collection of evaluation matrices.
193    ///
194    /// Each element of `evaluations` contains a coset `shift * H` and a matrix `mat` with `mat.height() = |H|`.
195    /// Interpreting each column of `mat` as the evaluations of a polynomial `p_i(x)` over `shift * H`,
196    /// this computes the evaluations of `p_i` over `gK` where `g` is the chosen generator of the multiplicative group
197    /// of `Val` and `K` is the unique subgroup of order `|H| << self.fri.log_blowup`.
198    ///
199    /// This then outputs a Merkle commitment to these evaluations.
200    fn commit(
201        &self,
202        evaluations: impl IntoIterator<Item = (Self::Domain, RowMajorMatrix<Val>)>,
203    ) -> (Self::Commitment, Self::ProverData) {
204        let ldes: Vec<_> = evaluations
205            .into_iter()
206            .map(|(domain, evals)| {
207                assert_eq!(domain.size(), evals.height());
208                // coset_lde_batch converts from evaluations over `xH` to evaluations over `shift * x * K`.
209                // Hence, letting `shift = g/x` the output will be evaluations over `gK` as desired.
210                // When `x = g`, we could just use the standard LDE but currently this doesn't seem
211                // to give a meaningful performance boost.
212                let shift = Val::GENERATOR / domain.shift();
213                // Compute the LDE with blowup factor fri.log_blowup.
214                // We bit reverse as this is required by our implementation of the FRI protocol.
215                self.dft
216                    .coset_lde_batch(evals, self.fri.log_blowup, shift)
217                    .bit_reverse_rows()
218                    .to_row_major_matrix()
219            })
220            .collect();
221
222        // Commit to the bit-reversed LDEs.
223        self.mmcs.commit(ldes)
224    }
225
226    /// Given the evaluations on a domain `gH`, return the evaluations on a different domain `g'K`.
227    ///
228    /// Arguments:
229    /// - `prover_data`: The prover data containing all committed evaluation matrices.
230    /// - `idx`: The index of the matrix containing the evaluations we want. These evaluations
231    ///   are assumed to be over the coset `gH` where `g = Val::GENERATOR`.
232    /// - `domain`: The domain `g'K` on which to get evaluations on. Currently, this assumes that
233    ///   `g' = g` and `K` is a subgroup of `H` and panics if this is not the case.
234    fn get_evaluations_on_domain<'a>(
235        &self,
236        prover_data: &'a Self::ProverData,
237        idx: usize,
238        domain: Self::Domain,
239    ) -> Self::EvaluationsOnDomain<'a> {
240        // todo: handle extrapolation for LDEs we don't have
241        assert_eq!(domain.shift(), Val::GENERATOR);
242        let lde = self.mmcs.get_matrices(prover_data)[idx];
243        assert!(lde.height() >= domain.size());
244        lde.split_rows(domain.size()).0.bit_reverse_rows()
245    }
246
247    /// Open a batch of matrices at a collection of points.
248    ///
249    /// Returns the opened values along with a proof.
250    ///
251    /// This function assumes that all matrices correspond to evaluations over the
252    /// coset `gH` where `g = Val::GENERATOR` and `H` is a subgroup of appropriate size depending on the
253    /// matrix.
254    fn open(
255        &self,
256        // For each multi-matrix commitment,
257        commitment_data_with_opening_points: Vec<(
258            // The matrices and auxiliary prover data
259            &Self::ProverData,
260            // for each matrix,
261            Vec<
262                // points to open
263                Vec<Challenge>,
264            >,
265        )>,
266        challenger: &mut Challenger,
267    ) -> (OpenedValues<Challenge>, Self::Proof) {
268        /*
269
270        A quick rundown of the optimizations in this function:
271        We are trying to compute sum_i alpha^i * (p(X) - y)/(X - z),
272        for each z an opening point, y = p(z). Each p(X) is given as evaluations in bit-reversed order
273        in the columns of the matrices. y is computed by barycentric interpolation.
274        X and p(X) are in the base field; alpha, y and z are in the extension.
275        The primary goal is to minimize extension multiplications.
276
277        - Instead of computing all alpha^i, we just compute alpha^i for i up to the largest width
278        of a matrix, then multiply by an "alpha offset" when accumulating.
279              a^0 x0 + a^1 x1 + a^2 x2 + a^3 x3 + ...
280            = ( a^0 x0 + a^1 x1 ) + a^2 ( a^0 x2 + a^1 x3 ) + ...
281            (see `alpha_pows`, `alpha_pow_offset`, `num_reduced`)
282
283        - For each unique point z, we precompute 1/(X-z) for the largest subgroup opened at this point.
284        Since we compute it in bit-reversed order, smaller subgroups can simply truncate the vector.
285            (see `inv_denoms`)
286
287        - Then, for each matrix (with columns p_i) and opening point z, we want:
288            for each row (corresponding to subgroup element X):
289                reduced[X] += alpha_offset * sum_i [ alpha^i * inv_denom[X] * (p_i[X] - y[i]) ]
290
291            We can factor out inv_denom, and expand what's left:
292                reduced[X] += alpha_offset * inv_denom[X] * sum_i [ alpha^i * p_i[X] - alpha^i * y[i] ]
293
294            And separate the sum:
295                reduced[X] += alpha_offset * inv_denom[X] * [ sum_i [ alpha^i * p_i[X] ] - sum_i [ alpha^i * y[i] ] ]
296
297            And now the last sum doesn't depend on X, so we can precompute that for the matrix, too.
298            So the hot loop (that depends on both X and i) is just:
299                sum_i [ alpha^i * p_i[X] ]
300
301            with alpha^i an extension, p_i[X] a base
302
303        */
304
305        // Contained in each `Self::ProverData` is a list of matrices which have been committed to.
306        // We extract those matrices to be able to refer to them directly.
307        let mats_and_points = commitment_data_with_opening_points
308            .iter()
309            .map(|(data, points)| {
310                let mats = self
311                    .mmcs
312                    .get_matrices(data)
313                    .into_iter()
314                    .map(|m| m.as_view())
315                    .collect_vec();
316                debug_assert_eq!(
317                    mats.len(),
318                    points.len(),
319                    "each matrix should have a corresponding set of evaluation points"
320                );
321                (mats, points)
322            })
323            .collect_vec();
324
325        // Find the maximum height and the maximum width of matrices in the batch.
326        // These do not need to correspond to the same matrix.
327        let (global_max_height, global_max_width) = mats_and_points
328            .iter()
329            .flat_map(|(mats, _)| mats.iter().map(|m| (m.height(), m.width())))
330            .reduce(|(hmax, wmax), (h, w)| (hmax.max(h), wmax.max(w)))
331            .expect("No Matrices Supplied?");
332        let log_global_max_height = log2_strict_usize(global_max_height);
333
334        // Get all values of the coset `gH` for the largest necessary subgroup `H`.
335        // We also bit reverse which means that coset has the nice property that
336        // `coset[..2^i]` contains the values of `gK` for `|K| = 2^i`.
337        let coset = {
338            let coset =
339                TwoAdicMultiplicativeCoset::new(Val::GENERATOR, log_global_max_height).unwrap();
340            let mut coset_points = coset.iter().collect();
341            reverse_slice_index_bits(&mut coset_points);
342            coset_points
343        };
344
345        // For each unique opening point z, we will find the largest degree bound
346        // for that point, and precompute 1/(z - X) for the largest subgroup (in bitrev order).
347        let inv_denoms = compute_inverse_denominators(&mats_and_points, &coset);
348
349        // Evaluate coset representations and write openings to the challenger
350        let all_opened_values = mats_and_points
351            .iter()
352            .map(|(mats, points)| {
353                // For each collection of matrices
354                izip!(mats.iter(), points.iter())
355                    .map(|(mat, points_for_mat)| {
356                        // TODO: This assumes that every input matrix has a blowup of at least self.fri.log_blowup.
357                        // If the blow_up factor is smaller than self.fri.log_blowup, this will lead to errors.
358                        // If it is bigger, we shouldn't get any errors but it will be slightly slower.
359                        // Ideally, polynomials could be passed in with their blow_up factors known.
360
361                        // The point of this correction is that each column of the matrix corresponds to a low degree polynomial.
362                        // Hence we can save time by restricting the height of the matrix to be the minimal height which
363                        // uniquely identifies the polynomial.
364                        let h = mat.height() >> self.fri.log_blowup;
365
366                        // `subgroup` and `mat` are both in bit-reversed order, so we can truncate.
367                        let (low_coset, _) = mat.split_rows(h);
368                        let coset_h = &coset[..h];
369
370                        points_for_mat
371                            .iter()
372                            .map(|&point| {
373                                let _guard =
374                                    info_span!("evaluate matrix", dims = %mat.dimensions())
375                                        .entered();
376
377                                // Use Barycentric interpolation to evaluate each column of the matrix at the given point.
378                                let ys =
379                                    info_span!("compute opened values with Lagrange interpolation")
380                                        .in_scope(|| {
381                                            // Get the relevant inverse denominators for this point and use these to
382                                            // interpolate to get the evaluation of each polynomial in the matrix
383                                            // at the desired point.
384                                            let inv_denoms = &inv_denoms.get(&point).unwrap()[..h];
385                                            interpolate_coset_with_precomputation(
386                                                &low_coset,
387                                                Val::GENERATOR,
388                                                point,
389                                                coset_h,
390                                                inv_denoms,
391                                            )
392                                        });
393                                challenger.observe_algebra_slice(&ys);
394                                ys
395                            })
396                            .collect_vec()
397                    })
398                    .collect_vec()
399            })
400            .collect_vec();
401
402        // Batch combination challenge
403
404        // Soundness Error:
405        // See the discussion in the doc comment of [`prove_fri`]. Essentially, the soundness error
406        // for this sample is tightly tied to the soundness error of the FRI protocol.
407        // Roughly speaking, at a minimum is it k/|EF| where `k` is the sum of, for each function, the number of
408        // points it needs to be opened at. This comes from the fact that we are takeing a large linear combination
409        // of `(f(zeta) - f(x))/(zeta - x)` for each function `f` and all of `f`'s opening points.
410        // In our setup, k is two times the trace width plus the number of quotient polynomials.
411        let alpha: Challenge = challenger.sample_algebra_element();
412
413        // We precompute powers of alpha as we need the same powers for each matrix.
414        // We compute both a vector of unpacked powers and a vector of packed powers.
415        // TODO: It should be possible to refactor this to only use the packed powers but
416        // this is not a bottleneck so is not a priority.
417        let packed_alpha_powers =
418            Challenge::ExtensionPacking::packed_ext_powers_capped(alpha, global_max_width)
419                .collect_vec();
420        let alpha_powers =
421            Challenge::ExtensionPacking::to_ext_iter(packed_alpha_powers.iter().copied())
422                .collect_vec();
423
424        // Now that we have sent the openings to the verifier, it remains to prove
425        // that those openings are correct.
426
427        // Given a low degree polynomial `f(x)` with claimed evaluation `f(zeta)`, we can check
428        // that `f(zeta)` is correct by doing a low degree test on `(f(zeta) - f(x))/(zeta - x)`.
429        // We will use `alpha` to batch together both different claimed openings `zeta` and
430        // different polynomials `f` whose evaluation vectors have the same height.
431
432        // TODO: If we allow different polynomials to have different blow_up factors
433        // we may need to revisit this and to ensure it is safe to batch them together.
434
435        // num_reduced records the number of (function, opening point) pairs for each `log_height`.
436        // TODO: This should really be `[0; Val::TWO_ADICITY]` but that runs into issues with generics.
437        let mut num_reduced = [0; 32];
438
439        // For each `log_height` from 2^1 -> 2^32, reduced_openings will contain either `None`
440        // if there are no matrices of that height, or `Some(vec)` where `vec` is equal to
441        // a weighted sum of `(f(zeta) - f(x))/(zeta - x)` over all `f`'s of that height and
442        // for each `f`, all opening points `zeta`. The sum is weighted by powers of the challenge alpha.
443        let mut reduced_openings: [_; 32] = core::array::from_fn(|_| None);
444
445        for ((mats, points), openings_for_round) in
446            mats_and_points.iter().zip(all_opened_values.iter())
447        {
448            for (mat, points_for_mat, openings_for_mat) in
449                izip!(mats.iter(), points.iter(), openings_for_round.iter())
450            {
451                let _guard =
452                    info_span!("reduce matrix quotient", dims = %mat.dimensions()).entered();
453
454                let log_height = log2_strict_usize(mat.height());
455
456                // If this is our first matrix at this height, initialise reduced_openings to zero.
457                // Otherwise, get a mutable reference to it.
458                let reduced_opening_for_log_height = reduced_openings[log_height]
459                    .get_or_insert_with(|| vec![Challenge::ZERO; mat.height()]);
460                debug_assert_eq!(reduced_opening_for_log_height.len(), mat.height());
461
462                // Treating our matrix M as the evaluations of functions f_0, f_1, ...
463                // Compute the evaluations of `Mred(x) = f_0(x) + alpha*f_1(x) + ...`
464                let mat_compressed = info_span!("compress mat").in_scope(|| {
465                    // This will be reused for all points z which M is opened at so we collect into a vector.
466                    mat.rowwise_packed_dot_product::<Challenge>(&packed_alpha_powers)
467                        .collect::<Vec<_>>()
468                });
469
470                for (&point, openings) in points_for_mat.iter().zip(openings_for_mat) {
471                    // If we have multiple matrices at the same height, we need to scale alpha to combine them.
472                    // This means that reduced_openings will contain:
473                    // Mred_0(x) + alpha^{M_0.width()}Mred_1(x) + alpha^{M_0.width() + M_1.width()}Mred_2(x) + ...
474                    // Where M_0, M_1, ... are the matrices of the same height.
475                    let alpha_pow_offset = alpha.exp_u64(num_reduced[log_height] as u64);
476
477                    // As we have all the openings `f_i(z)`, we can combine them using `alpha`
478                    // in an identical way to before to compute `Mred(z)`.
479                    let reduced_openings: Challenge =
480                        dot_product(alpha_powers.iter().copied(), openings.iter().copied());
481
482                    mat_compressed
483                        .par_iter()
484                        .zip(reduced_opening_for_log_height.par_iter_mut())
485                        // inv_denoms contains `1/(z - x)` for `x` in a coset `gK`.
486                        // If `|K| =/= mat.height()` we actually want a subset of this
487                        // corresponding to the evaluations over `gH` for `|H| = mat.height()`.
488                        // As inv_denoms is bit reversed, the evaluations over `gH` are exactly
489                        // the evaluations over `gK` at the indices `0..mat.height()`.
490                        // So zip will truncate to the desired smaller length.
491                        .zip(inv_denoms.get(&point).unwrap().par_iter())
492                        // Map the function `Mred(x) -> (Mred(z) - Mred(x))/(z - x)`
493                        // across the evaluation vector of `Mred(x)`. Adjust by alpha_pow_offset
494                        // as needed.
495                        .for_each(|((&reduced_row, ro), &inv_denom)| {
496                            *ro += alpha_pow_offset * (reduced_openings - reduced_row) * inv_denom;
497                        });
498                    num_reduced[log_height] += mat.width();
499                }
500            }
501        }
502
503        // It remains to prove that all evaluation vectors in reduced_openings correspond to
504        // low degree functions.
505        let fri_input = reduced_openings.into_iter().rev().flatten().collect_vec();
506
507        let folding: TwoAdicFriFoldingForMmcs<Val, InputMmcs> = TwoAdicFriFolding(PhantomData);
508
509        // Produce the FRI proof.
510        let fri_proof = prover::prove_fri(
511            &folding,
512            &self.fri,
513            fri_input,
514            challenger,
515            log_global_max_height,
516            &commitment_data_with_opening_points,
517            &self.mmcs,
518        );
519
520        (all_opened_values, fri_proof)
521    }
522
523    fn verify(
524        &self,
525        // For each commitment:
526        commitments_with_opening_points: Vec<
527            CommitmentWithOpeningPoints<Challenge, Self::Commitment, Self::Domain>,
528        >,
529        proof: &Self::Proof,
530        challenger: &mut Challenger,
531    ) -> Result<(), Self::Error> {
532        // Write all evaluations to challenger.
533        // Need to ensure to do this in the same order as the prover.
534        for (_, round) in &commitments_with_opening_points {
535            for (_, mat) in round {
536                for (_, point) in mat {
537                    challenger.observe_algebra_slice(point);
538                }
539            }
540        }
541
542        let folding: TwoAdicFriFoldingForMmcs<Val, InputMmcs> = TwoAdicFriFolding(PhantomData);
543
544        verifier::verify_fri(
545            &folding,
546            &self.fri,
547            proof,
548            challenger,
549            &commitments_with_opening_points,
550            &self.mmcs,
551        )?;
552
553        Ok(())
554    }
555}
556
557/// Compute vectors of inverse denominators for each unique opening point.
558///
559/// Arguments:
560/// - `mats_and_points` is a list of matrices and for each matrix a list of points. We assume that
561///    the total number of distinct points is very small as several methods contained herein are `O(n^2)`
562///    in the number of points.
563/// - `coset` is the set of points `gH` where `H` a two-adic subgroup such that `|H|` is greater
564///     than or equal to the largest height of any matrix in `mats_and_points`. The values
565///     in `coset` must be in bit-reversed order.
566///
567/// For each point `z`, let `M` be the matrix of largest height which opens at `z`.
568/// let `H_z` be the unique subgroup of order `M.height()`. Compute the vector of
569/// `1/(z - x)` for `x` in `gH_z`.
570///
571/// Return a LinearMap which allows us to recover the computed vectors for each `z`.
572#[instrument(skip_all)]
573fn compute_inverse_denominators<F: TwoAdicField, EF: ExtensionField<F>, M: Matrix<F>>(
574    mats_and_points: &[(Vec<M>, &Vec<Vec<EF>>)],
575    coset: &[F],
576) -> LinearMap<EF, Vec<EF>> {
577    // For each `z`, find the maximal height of any matrix which we need to
578    // open at `z`.
579    let mut max_log_height_for_point: LinearMap<EF, usize> = LinearMap::new();
580    for (mats, points) in mats_and_points {
581        for (mat, points_for_mat) in izip!(mats, *points) {
582            let log_height = log2_strict_usize(mat.height());
583            for &z in points_for_mat {
584                if let Some(lh) = max_log_height_for_point.get_mut(&z) {
585                    *lh = core::cmp::max(*lh, log_height);
586                } else {
587                    max_log_height_for_point.insert(z, log_height);
588                }
589            }
590        }
591    }
592
593    // Compute the inverse denominators for each point `z`.
594    max_log_height_for_point
595        .into_iter()
596        .map(|(z, log_height)| {
597            (
598                z,
599                batch_multiplicative_inverse(
600                    // As coset is stored in bit-reversed order,
601                    // we can just take the first `2^log_height` elements.
602                    &coset[..(1 << log_height)]
603                        .iter()
604                        .map(|&x| z - x)
605                        .collect_vec(),
606                ),
607            )
608        })
609        .collect()
610}