halo2_ecc/fields/
mod.rs

1use crate::halo2_proofs::arithmetic::Field;
2use halo2_base::{
3    gates::{GateInstructions, RangeInstructions},
4    utils::{BigPrimeField, ScalarField},
5    AssignedValue, Context,
6};
7use num_bigint::BigUint;
8use serde::{Deserialize, Serialize};
9use std::fmt::Debug;
10
11pub mod fp;
12pub mod fp12;
13pub mod fp2;
14pub mod vector;
15
16#[cfg(test)]
17mod tests;
18
19/// Trait for common functionality for finite field chips.
20/// Primarily intended to emulate a "non-native" finite field using "native" values in a prime field `F`.
21/// Most functions are designed for the case when the non-native field is larger than the native field, but
22/// the trait can still be implemented and used in other cases.
23pub trait FieldChip<F: BigPrimeField>: Clone + Send + Sync {
24    const PRIME_FIELD_NUM_BITS: u32;
25
26    /// A representation of a field element that is used for intermediate computations.
27    /// The representation can have "overflows" (e.g., overflow limbs or negative limbs).
28    type UnsafeFieldPoint: Clone
29        + Debug
30        + Send
31        + Sync
32        + From<Self::FieldPoint>
33        + for<'a> From<&'a Self::UnsafeFieldPoint>
34        + for<'a> From<&'a Self::FieldPoint>; // Cloning all the time impacts readability, so we allow references to be cloned into owned values
35
36    /// The "proper" representation of a field element. Allowed to be a non-unique representation of a field element (e.g., can be greater than modulus)
37    type FieldPoint: Clone
38        + Debug
39        + Send
40        + Sync
41        + From<Self::ReducedFieldPoint>
42        + for<'a> From<&'a Self::FieldPoint>;
43
44    /// A proper representation of field elements that guarantees a unique representation of each field element. Typically this means Uints that are less than the modulus.
45    type ReducedFieldPoint: Clone + Debug + Send + Sync;
46
47    /// A type implementing `Field` trait to help with witness generation (for example with inverse)
48    type FieldType: Field;
49    type RangeChip: RangeInstructions<F>;
50
51    fn native_modulus(&self) -> &BigUint;
52    fn gate(&self) -> &<Self::RangeChip as RangeInstructions<F>>::Gate {
53        self.range().gate()
54    }
55    fn range(&self) -> &Self::RangeChip;
56    fn limb_bits(&self) -> usize;
57
58    fn get_assigned_value(&self, x: &Self::UnsafeFieldPoint) -> Self::FieldType;
59
60    /// Assigns `fe` as private witness. Note that the witness may **not** be constrained to be a unique representation of the field element `fe`.
61    fn load_private(&self, ctx: &mut Context<F>, fe: Self::FieldType) -> Self::FieldPoint;
62
63    /// Assigns `fe` as private witness and contrains the witness to be in reduced form.
64    fn load_private_reduced(
65        &self,
66        ctx: &mut Context<F>,
67        fe: Self::FieldType,
68    ) -> Self::ReducedFieldPoint {
69        let fe = self.load_private(ctx, fe);
70        self.enforce_less_than(ctx, fe)
71    }
72
73    /// Assigns `fe` as constant.
74    fn load_constant(&self, ctx: &mut Context<F>, fe: Self::FieldType) -> Self::FieldPoint;
75
76    fn add_no_carry(
77        &self,
78        ctx: &mut Context<F>,
79        a: impl Into<Self::UnsafeFieldPoint>,
80        b: impl Into<Self::UnsafeFieldPoint>,
81    ) -> Self::UnsafeFieldPoint;
82
83    /// output: `a + c`
84    fn add_constant_no_carry(
85        &self,
86        ctx: &mut Context<F>,
87        a: impl Into<Self::UnsafeFieldPoint>,
88        c: Self::FieldType,
89    ) -> Self::UnsafeFieldPoint;
90
91    fn sub_no_carry(
92        &self,
93        ctx: &mut Context<F>,
94        a: impl Into<Self::UnsafeFieldPoint>,
95        b: impl Into<Self::UnsafeFieldPoint>,
96    ) -> Self::UnsafeFieldPoint;
97
98    fn negate(&self, ctx: &mut Context<F>, a: Self::FieldPoint) -> Self::FieldPoint;
99
100    /// a * c
101    fn scalar_mul_no_carry(
102        &self,
103        ctx: &mut Context<F>,
104        a: impl Into<Self::UnsafeFieldPoint>,
105        c: i64,
106    ) -> Self::UnsafeFieldPoint;
107
108    /// a * c + b
109    fn scalar_mul_and_add_no_carry(
110        &self,
111        ctx: &mut Context<F>,
112        a: impl Into<Self::UnsafeFieldPoint>,
113        b: impl Into<Self::UnsafeFieldPoint>,
114        c: i64,
115    ) -> Self::UnsafeFieldPoint;
116
117    fn mul_no_carry(
118        &self,
119        ctx: &mut Context<F>,
120        a: impl Into<Self::UnsafeFieldPoint>,
121        b: impl Into<Self::UnsafeFieldPoint>,
122    ) -> Self::UnsafeFieldPoint;
123
124    fn check_carry_mod_to_zero(&self, ctx: &mut Context<F>, a: Self::UnsafeFieldPoint);
125
126    fn carry_mod(&self, ctx: &mut Context<F>, a: Self::UnsafeFieldPoint) -> Self::FieldPoint;
127
128    fn range_check(&self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>, max_bits: usize);
129
130    /// Constrains that `a` is a reduced representation and returns the wrapped `a`.
131    fn enforce_less_than(
132        &self,
133        ctx: &mut Context<F>,
134        a: Self::FieldPoint,
135    ) -> Self::ReducedFieldPoint;
136
137    // Returns 1 iff the underlying big integer for `a` is 0. Otherwise returns 0.
138    // For field extensions, checks coordinate-wise.
139    fn is_soft_zero(
140        &self,
141        ctx: &mut Context<F>,
142        a: impl Into<Self::FieldPoint>,
143    ) -> AssignedValue<F>;
144
145    // Constrains that the underlying big integer is in [0, p - 1].
146    // Then returns 1 iff the underlying big integer for `a` is 0. Otherwise returns 0.
147    // For field extensions, checks coordinate-wise.
148    fn is_soft_nonzero(
149        &self,
150        ctx: &mut Context<F>,
151        a: impl Into<Self::FieldPoint>,
152    ) -> AssignedValue<F>;
153
154    fn is_zero(&self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>) -> AssignedValue<F>;
155
156    fn is_equal_unenforced(
157        &self,
158        ctx: &mut Context<F>,
159        a: Self::ReducedFieldPoint,
160        b: Self::ReducedFieldPoint,
161    ) -> AssignedValue<F>;
162
163    fn assert_equal(
164        &self,
165        ctx: &mut Context<F>,
166        a: impl Into<Self::FieldPoint>,
167        b: impl Into<Self::FieldPoint>,
168    );
169
170    // =========== default implementations =============
171
172    // assuming `a, b` have been range checked to be a proper BigInt
173    // constrain the witnesses `a, b` to be `< p`
174    // then check `a == b` as BigInts
175    fn is_equal(
176        &self,
177        ctx: &mut Context<F>,
178        a: impl Into<Self::FieldPoint>,
179        b: impl Into<Self::FieldPoint>,
180    ) -> AssignedValue<F> {
181        let a = self.enforce_less_than(ctx, a.into());
182        let b = self.enforce_less_than(ctx, b.into());
183        // a.native and b.native are derived from `a.truncation, b.truncation`, so no need to check if they're equal
184        self.is_equal_unenforced(ctx, a, b)
185    }
186
187    /// If using `UnsafeFieldPoint`, make sure multiplication does not cause overflow.
188    fn mul(
189        &self,
190        ctx: &mut Context<F>,
191        a: impl Into<Self::UnsafeFieldPoint>,
192        b: impl Into<Self::UnsafeFieldPoint>,
193    ) -> Self::FieldPoint {
194        let no_carry = self.mul_no_carry(ctx, a, b);
195        self.carry_mod(ctx, no_carry)
196    }
197
198    /// Constrains that `b` is nonzero as a field element and then returns `a / b`.
199    fn divide(
200        &self,
201        ctx: &mut Context<F>,
202        a: impl Into<Self::FieldPoint>,
203        b: impl Into<Self::FieldPoint>,
204    ) -> Self::FieldPoint {
205        let b = b.into();
206        let b_is_zero = self.is_zero(ctx, b.clone());
207        self.gate().assert_is_const(ctx, &b_is_zero, &F::ZERO);
208
209        self.divide_unsafe(ctx, a.into(), b)
210    }
211
212    /// Returns `a / b` without constraining `b` to be nonzero.
213    ///
214    /// Warning: undefined behavior when `b` is zero.
215    ///
216    /// `a, b` must be such that `quot * b - a` without carry does not overflow, where `quot` is the output.
217    fn divide_unsafe(
218        &self,
219        ctx: &mut Context<F>,
220        a: impl Into<Self::UnsafeFieldPoint>,
221        b: impl Into<Self::UnsafeFieldPoint>,
222    ) -> Self::FieldPoint {
223        let a = a.into();
224        let b = b.into();
225        let a_val = self.get_assigned_value(&a);
226        let b_val = self.get_assigned_value(&b);
227        let b_inv: Self::FieldType = Option::from(b_val.invert()).unwrap_or_default();
228        let quot_val = a_val * b_inv;
229
230        let quot = self.load_private(ctx, quot_val);
231
232        // constrain quot * b - a = 0 mod p
233        let quot_b = self.mul_no_carry(ctx, quot.clone(), b);
234        let quot_constraint = self.sub_no_carry(ctx, quot_b, a);
235        self.check_carry_mod_to_zero(ctx, quot_constraint);
236
237        quot
238    }
239
240    /// Constrains that `b` is nonzero as a field element and then returns `-a / b`.
241    fn neg_divide(
242        &self,
243        ctx: &mut Context<F>,
244        a: impl Into<Self::FieldPoint>,
245        b: impl Into<Self::FieldPoint>,
246    ) -> Self::FieldPoint {
247        let b = b.into();
248        let b_is_zero = self.is_zero(ctx, b.clone());
249        self.gate().assert_is_const(ctx, &b_is_zero, &F::ZERO);
250
251        self.neg_divide_unsafe(ctx, a.into(), b)
252    }
253
254    // Returns `-a / b` without constraining `b` to be nonzero.
255    // this is usually cheaper constraint-wise than computing -a and then (-a) / b separately
256    fn neg_divide_unsafe(
257        &self,
258        ctx: &mut Context<F>,
259        a: impl Into<Self::UnsafeFieldPoint>,
260        b: impl Into<Self::UnsafeFieldPoint>,
261    ) -> Self::FieldPoint {
262        let a = a.into();
263        let b = b.into();
264        let a_val = self.get_assigned_value(&a);
265        let b_val = self.get_assigned_value(&b);
266        let b_inv: Self::FieldType = Option::from(b_val.invert()).unwrap_or_default();
267        let quot_val = -a_val * b_inv;
268
269        let quot = self.load_private(ctx, quot_val);
270
271        // constrain quot * b + a = 0 mod p
272        let quot_b = self.mul_no_carry(ctx, quot.clone(), b);
273        let quot_constraint = self.add_no_carry(ctx, quot_b, a);
274        self.check_carry_mod_to_zero(ctx, quot_constraint);
275
276        quot
277    }
278}
279
280pub trait Selectable<F: ScalarField, Pt> {
281    fn select(&self, ctx: &mut Context<F>, a: Pt, b: Pt, sel: AssignedValue<F>) -> Pt;
282
283    fn select_by_indicator(
284        &self,
285        ctx: &mut Context<F>,
286        a: &impl AsRef<[Pt]>,
287        coeffs: &[AssignedValue<F>],
288    ) -> Pt;
289}
290
291// Common functionality for prime field chips
292pub trait PrimeFieldChip<F: BigPrimeField>: FieldChip<F>
293where
294    Self::FieldType: BigPrimeField,
295{
296    fn num_limbs(&self) -> usize;
297    fn limb_mask(&self) -> &BigUint;
298    fn limb_bases(&self) -> &[F];
299}
300
301// helper trait so we can actually construct and read the Fp2 struct
302// needs to be implemented for Fp2 struct for use cases below
303pub trait FieldExtConstructor<Fp: crate::ff::PrimeField, const DEGREE: usize> {
304    fn new(c: [Fp; DEGREE]) -> Self;
305
306    fn coeffs(&self) -> Vec<Fp>;
307}
308
309#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
310pub enum FpStrategy {
311    Simple,
312}