p3_field/
helpers.rs

1use alloc::vec;
2use alloc::vec::Vec;
3use core::iter::Sum;
4use core::mem::{ManuallyDrop, MaybeUninit};
5use core::ops::Mul;
6
7use num_bigint::BigUint;
8use p3_maybe_rayon::prelude::{IntoParallelRefMutIterator, ParallelIterator};
9
10use crate::field::Field;
11use crate::{FieldAlgebra, PackedValue, PrimeField, PrimeField32, TwoAdicField};
12
13/// Computes `Z_H(x)`, where `Z_H` is the zerofier of a multiplicative subgroup of order `2^log_n`.
14pub fn two_adic_subgroup_zerofier<F: TwoAdicField>(log_n: usize, x: F) -> F {
15    x.exp_power_of_2(log_n) - F::ONE
16}
17
18/// Computes `Z_{sH}(x)`, where `Z_{sH}` is the zerofier of the given coset of a multiplicative
19/// subgroup of order `2^log_n`.
20pub fn two_adic_coset_zerofier<F: TwoAdicField>(log_n: usize, shift: F, x: F) -> F {
21    x.exp_power_of_2(log_n) - shift.exp_power_of_2(log_n)
22}
23
24/// Computes a multiplicative subgroup whose order is known in advance.
25pub fn cyclic_subgroup_known_order<F: Field>(
26    generator: F,
27    order: usize,
28) -> impl Iterator<Item = F> + Clone {
29    generator.powers().take(order)
30}
31
32/// Computes a coset of a multiplicative subgroup whose order is known in advance.
33pub fn cyclic_subgroup_coset_known_order<F: Field>(
34    generator: F,
35    shift: F,
36    order: usize,
37) -> impl Iterator<Item = F> + Clone {
38    generator.shifted_powers(shift).take(order)
39}
40
41#[must_use]
42pub fn add_vecs<F: Field>(v: Vec<F>, w: Vec<F>) -> Vec<F> {
43    assert_eq!(v.len(), w.len());
44    v.into_iter().zip(w).map(|(x, y)| x + y).collect()
45}
46
47pub fn sum_vecs<F: Field, I: Iterator<Item = Vec<F>>>(iter: I) -> Vec<F> {
48    iter.reduce(|v, w| add_vecs(v, w))
49        .expect("sum_vecs: empty iterator")
50}
51
52pub fn scale_vec<F: Field>(s: F, vec: Vec<F>) -> Vec<F> {
53    vec.into_iter().map(|x| s * x).collect()
54}
55
56pub fn scale_slice_in_place<F: Field>(s: F, slice: &mut [F]) {
57    let (packed, sfx) = F::Packing::pack_slice_with_suffix_mut(slice);
58    let packed_s: F::Packing = s.into();
59    packed.par_iter_mut().for_each(|x| *x *= packed_s);
60    sfx.iter_mut().for_each(|x| *x *= s);
61}
62
63/// `x += y * s`, where `s` is a scalar.
64pub fn add_scaled_slice_in_place<F, Y>(x: &mut [F], y: Y, s: F)
65where
66    F: Field,
67    Y: Iterator<Item = F>,
68{
69    // TODO: Use PackedField
70    x.iter_mut().zip(y).for_each(|(x_i, y_i)| *x_i += y_i * s);
71}
72
73// The ideas for the following work around come from the construe crate along with
74// the playground example linked in the following comment:
75// https://github.com/rust-lang/rust/issues/115403#issuecomment-1701000117
76
77// The goal is to want to make field_to_array a const function in order
78// to allow us to convert FA constants to BinomialExtensionField<FA, D> constants.
79//
80// The natural approach would be:
81// fn field_to_array<FA: FieldAlgebra, const D: usize>(x: FA) -> [FA; D]
82//      let mut arr: [FA; D] = [FA::ZERO; D];
83//      arr[0] = x
84//      arr
85//
86// Unfortunately this doesn't compile as FA does not implement Copy and so instead
87// implements Drop which cannot be run in constant contexts. Clearly nothing should
88// actually be dropped by the above function but the compiler is unable to determine this.
89// There is a rust issue for this: https://github.com/rust-lang/rust/issues/73255
90// but it seems unlikely to be stabilized anytime soon.
91//
92// The natural workaround for this is to use MaybeUninit and set each element of the list
93// separately. This mostly works but we end up with an array of the form [MaybeUninit<T>; N]
94// and there is not currently a way in the standard library to convert this to [T; N].
95// There is a method on nightly: array_assume_init so this function should be reworked after
96// that has stabilized (More details in Rust issue: https://github.com/rust-lang/rust/issues/96097).
97//
98// Annoyingly, both transmute and transmute_copy fail here. The first because it cannot handle
99// const generics and the second due to interior mutability and the unability to use &mut in const
100// functions.
101//
102// The solution is to implement the map [MaybeUninit<T>; D]) -> MaybeUninit<[T; D]>
103// using Union types and ManuallyDrop to essentially do a manual transmute.
104
105union HackyWorkAround<T, const D: usize> {
106    complete: ManuallyDrop<MaybeUninit<[T; D]>>,
107    elements: ManuallyDrop<[MaybeUninit<T>; D]>,
108}
109
110impl<T, const D: usize> HackyWorkAround<T, D> {
111    const fn transpose(arr: [MaybeUninit<T>; D]) -> MaybeUninit<[T; D]> {
112        // This is safe as [MaybeUninit<T>; D]> and MaybeUninit<[T; D]> are
113        // the same type regardless of T. Both are an array or size equal to [T; D]
114        // with some data potentially not initialized.
115        let transpose = Self {
116            elements: ManuallyDrop::new(arr),
117        };
118        unsafe { ManuallyDrop::into_inner(transpose.complete) }
119    }
120}
121
122/// Extend a field `FA` element `x` to an array of length `D`
123/// by filling zeros.
124#[inline]
125pub const fn field_to_array<FA: FieldAlgebra, const D: usize>(x: FA) -> [FA; D] {
126    let mut arr: [MaybeUninit<FA>; D] = unsafe { MaybeUninit::uninit().assume_init() };
127
128    arr[0] = MaybeUninit::new(x);
129    let mut acc = 1;
130    loop {
131        if acc == D {
132            break;
133        }
134        arr[acc] = MaybeUninit::new(FA::ZERO);
135        acc += 1;
136    }
137    // If the code has reached this point every element of arr is correctly initialized.
138    // Hence we are safe to reinterpret the array as [FA; D].
139
140    unsafe { HackyWorkAround::transpose(arr).assume_init() }
141}
142
143/// Naive polynomial multiplication.
144pub fn naive_poly_mul<FA: FieldAlgebra>(a: &[FA], b: &[FA]) -> Vec<FA> {
145    // Grade school algorithm
146    let mut product = vec![FA::ZERO; a.len() + b.len() - 1];
147    for (i, c1) in a.iter().enumerate() {
148        for (j, c2) in b.iter().enumerate() {
149            product[i + j] += c1.clone() * c2.clone();
150        }
151    }
152    product
153}
154
155/// Expand a product of binomials (x - roots[0])(x - roots[1]).. into polynomial coefficients.
156pub fn binomial_expand<FA: FieldAlgebra>(roots: &[FA]) -> Vec<FA> {
157    let mut coeffs = vec![FA::ZERO; roots.len() + 1];
158    coeffs[0] = FA::ONE;
159    for (i, x) in roots.iter().enumerate() {
160        for j in (1..i + 2).rev() {
161            coeffs[j] = coeffs[j - 1].clone() - x.clone() * coeffs[j].clone();
162        }
163        coeffs[0] *= -x.clone();
164    }
165    coeffs
166}
167
168pub fn eval_poly<FA: FieldAlgebra>(poly: &[FA], x: FA) -> FA {
169    let mut acc = FA::ZERO;
170    for coeff in poly.iter().rev() {
171        acc *= x.clone();
172        acc += coeff.clone();
173    }
174    acc
175}
176
177/// Given an element x from a 32 bit field F_P compute x/2.
178#[inline]
179pub fn halve_u32<const P: u32>(input: u32) -> u32 {
180    let shift = (P + 1) >> 1;
181    let shr = input >> 1;
182    let lo_bit = input & 1;
183    let shr_corr = shr + shift;
184    if lo_bit == 0 {
185        shr
186    } else {
187        shr_corr
188    }
189}
190
191/// Given an element x from a 64 bit field F_P compute x/2.
192#[inline]
193pub fn halve_u64<const P: u64>(input: u64) -> u64 {
194    let shift = (P + 1) >> 1;
195    let shr = input >> 1;
196    let lo_bit = input & 1;
197    let shr_corr = shr + shift;
198    if lo_bit == 0 {
199        shr
200    } else {
201        shr_corr
202    }
203}
204
205/// Given a slice of SF elements, reduce them to a TF element using a 2^32-base decomposition.
206pub fn reduce_32<SF: PrimeField32, TF: PrimeField>(vals: &[SF]) -> TF {
207    let po2 = TF::from_canonical_u64(1u64 << 32);
208    let mut result = TF::ZERO;
209    for val in vals.iter().rev() {
210        result = result * po2 + TF::from_canonical_u32(val.as_canonical_u32());
211    }
212    result
213}
214
215/// Given an SF element, split it to a vector of TF elements using a 2^64-base decomposition.
216///
217/// We use a 2^64-base decomposition for a field of size ~2^32 because then the bias will be
218/// at most ~1/2^32 for each element after the reduction.
219pub fn split_32<SF: PrimeField, TF: PrimeField32>(val: SF, n: usize) -> Vec<TF> {
220    let po2 = BigUint::from(1u128 << 64);
221    let mut val = val.as_canonical_biguint();
222    let mut result = Vec::new();
223    for _ in 0..n {
224        let mask: BigUint = po2.clone() - BigUint::from(1u128);
225        let digit: BigUint = val.clone() & mask;
226        let digit_u64s = digit.to_u64_digits();
227        if !digit_u64s.is_empty() {
228            result.push(TF::from_wrapped_u64(digit_u64s[0]));
229        } else {
230            result.push(TF::ZERO)
231        }
232        val /= po2.clone();
233    }
234    result
235}
236
237/// Maximally generic dot product.
238pub fn dot_product<S, LI, RI>(li: LI, ri: RI) -> S
239where
240    LI: Iterator,
241    RI: Iterator,
242    LI::Item: Mul<RI::Item>,
243    S: Sum<<LI::Item as Mul<RI::Item>>::Output>,
244{
245    li.zip(ri).map(|(l, r)| l * r).sum()
246}