1use super::{FieldChip, PrimeFieldChip, Selectable};
2use crate::bigint::{
3 add_no_carry, big_is_equal, big_is_zero, carry_mod, check_carry_mod_to_zero, mul_no_carry,
4 scalar_mul_and_add_no_carry, scalar_mul_no_carry, select, select_by_indicator, sub,
5 sub_no_carry, CRTInteger, FixedCRTInteger, OverflowInteger, ProperCrtUint, ProperUint,
6};
7use crate::halo2_proofs::halo2curves::CurveAffine;
8use halo2_base::gates::RangeChip;
9use halo2_base::utils::{BigPrimeField, ScalarField};
10use halo2_base::{
11 gates::{range::RangeConfig, GateInstructions, RangeInstructions},
12 utils::{bigint_to_fe, biguint_to_fe, bit_length, decompose_biguint, fe_to_biguint, modulus},
13 AssignedValue, Context,
14 QuantumCell::{Constant, Existing},
15};
16use num_bigint::{BigInt, BigUint};
17use num_traits::One;
18use std::cmp;
19use std::{cmp::max, marker::PhantomData};
20
21pub type BaseFieldChip<'range, C> =
22 FpChip<'range, <C as CurveAffine>::ScalarExt, <C as CurveAffine>::Base>;
23
24pub type FpConfig<F> = RangeConfig<F>;
25
26#[derive(Clone, Debug)]
30pub struct Reduced<FieldPoint, Fp>(pub(crate) FieldPoint, PhantomData<Fp>);
31
32impl<FieldPoint, Fp> Reduced<FieldPoint, Fp> {
33 pub fn as_ref(&self) -> Reduced<&FieldPoint, Fp> {
34 Reduced(&self.0, PhantomData)
35 }
36
37 pub fn inner(&self) -> &FieldPoint {
38 &self.0
39 }
40}
41
42impl<F: ScalarField, Fp> From<Reduced<ProperCrtUint<F>, Fp>> for ProperCrtUint<F> {
43 fn from(x: Reduced<ProperCrtUint<F>, Fp>) -> Self {
44 x.0
45 }
46}
47
48#[derive(Clone, Debug)]
51pub struct FpChip<'range, F: BigPrimeField, Fp: BigPrimeField> {
52 pub range: &'range RangeChip<F>,
53
54 pub limb_bits: usize,
55 pub num_limbs: usize,
56
57 pub num_limbs_bits: usize,
58 pub num_limbs_log2_ceil: usize,
59 pub limb_bases: Vec<F>,
60 pub limb_base_big: BigInt,
61 pub limb_mask: BigUint,
62
63 pub p: BigInt,
64 pub p_limbs: Vec<F>,
65 pub p_native: F,
66
67 pub native_modulus: BigUint,
68 _marker: PhantomData<Fp>,
69}
70
71impl<'range, F: BigPrimeField, Fp: BigPrimeField> FpChip<'range, F, Fp> {
72 pub fn new(range: &'range RangeChip<F>, limb_bits: usize, num_limbs: usize) -> Self {
73 assert!(limb_bits > 0);
74 assert!(num_limbs > 0);
75 assert!(limb_bits <= F::CAPACITY as usize);
76 let limb_mask = (BigUint::from(1u64) << limb_bits) - 1usize;
77 let p = modulus::<Fp>();
78 let p_limbs = decompose_biguint(&p, num_limbs, limb_bits);
79 let native_modulus = modulus::<F>();
80 let p_native = biguint_to_fe(&(&p % &native_modulus));
81
82 let limb_base = biguint_to_fe::<F>(&(BigUint::one() << limb_bits));
83 let mut limb_bases = Vec::with_capacity(num_limbs);
84 limb_bases.push(F::ONE);
85 while limb_bases.len() != num_limbs {
86 limb_bases.push(limb_base * limb_bases.last().unwrap());
87 }
88
89 Self {
90 range,
91 limb_bits,
92 num_limbs,
93 num_limbs_bits: bit_length(num_limbs as u64),
94 num_limbs_log2_ceil: bit_length(num_limbs as u64),
95 limb_bases,
96 limb_base_big: BigInt::one() << limb_bits,
97 limb_mask,
98 p: p.into(),
99 p_limbs,
100 p_native,
101 native_modulus,
102 _marker: PhantomData,
103 }
104 }
105
106 pub fn enforce_less_than_p(&self, ctx: &mut Context<F>, a: ProperCrtUint<F>) {
107 let mut borrow: Option<AssignedValue<F>> = None;
109 for (&p_limb, a_limb) in self.p_limbs.iter().zip(a.0.truncation.limbs) {
110 let lt = match borrow {
111 None => self.range.is_less_than(ctx, a_limb, Constant(p_limb), self.limb_bits),
112 Some(borrow) => {
113 let plus_borrow = self.gate().add(ctx, Constant(p_limb), borrow);
114 self.range.is_less_than(
115 ctx,
116 Existing(a_limb),
117 Existing(plus_borrow),
118 self.limb_bits,
119 )
120 }
121 };
122 borrow = Some(lt);
123 }
124 self.gate().assert_is_const(ctx, &borrow.unwrap(), &F::ONE);
125 }
126
127 pub fn load_constant_uint(&self, ctx: &mut Context<F>, a: BigUint) -> ProperCrtUint<F> {
128 FixedCRTInteger::from_native(a, self.num_limbs, self.limb_bits).assign(
129 ctx,
130 self.limb_bits,
131 self.native_modulus(),
132 )
133 }
134}
135
136impl<F: BigPrimeField, Fp: BigPrimeField> PrimeFieldChip<F> for FpChip<'_, F, Fp> {
137 fn num_limbs(&self) -> usize {
138 self.num_limbs
139 }
140 fn limb_mask(&self) -> &BigUint {
141 &self.limb_mask
142 }
143 fn limb_bases(&self) -> &[F] {
144 &self.limb_bases
145 }
146}
147
148impl<'range, F: BigPrimeField, Fp: BigPrimeField> FieldChip<F> for FpChip<'range, F, Fp> {
149 const PRIME_FIELD_NUM_BITS: u32 = Fp::NUM_BITS;
150 type UnsafeFieldPoint = CRTInteger<F>;
151 type FieldPoint = ProperCrtUint<F>;
152 type ReducedFieldPoint = Reduced<ProperCrtUint<F>, Fp>;
153 type FieldType = Fp;
154 type RangeChip = RangeChip<F>;
155
156 fn native_modulus(&self) -> &BigUint {
157 &self.native_modulus
158 }
159 fn range(&self) -> &'range Self::RangeChip {
160 self.range
161 }
162 fn limb_bits(&self) -> usize {
163 self.limb_bits
164 }
165
166 fn get_assigned_value(&self, x: &CRTInteger<F>) -> Fp {
167 bigint_to_fe(&(&x.value % &self.p))
168 }
169
170 fn load_private(&self, ctx: &mut Context<F>, a: Fp) -> ProperCrtUint<F> {
171 let a = fe_to_biguint(&a);
172 let a_vec = decompose_biguint::<F>(&a, self.num_limbs, self.limb_bits);
173 let limbs = ctx.assign_witnesses(a_vec);
174
175 let a_loaded =
176 ProperUint(limbs).into_crt(ctx, self.gate(), a, &self.limb_bases, self.limb_bits);
177
178 self.range_check(ctx, a_loaded.clone(), Self::PRIME_FIELD_NUM_BITS as usize);
179 a_loaded
180 }
181
182 fn load_constant(&self, ctx: &mut Context<F>, a: Fp) -> ProperCrtUint<F> {
183 self.load_constant_uint(ctx, fe_to_biguint(&a))
184 }
185
186 fn add_no_carry(
188 &self,
189 ctx: &mut Context<F>,
190 a: impl Into<CRTInteger<F>>,
191 b: impl Into<CRTInteger<F>>,
192 ) -> CRTInteger<F> {
193 add_no_carry::crt(self.gate(), ctx, a.into(), b.into())
194 }
195
196 fn add_constant_no_carry(
197 &self,
198 ctx: &mut Context<F>,
199 a: impl Into<CRTInteger<F>>,
200 c: Fp,
201 ) -> CRTInteger<F> {
202 let c = FixedCRTInteger::from_native(fe_to_biguint(&c), self.num_limbs, self.limb_bits);
203 let c_native = biguint_to_fe::<F>(&(&c.value % modulus::<F>()));
204 let a = a.into();
205 let mut limbs = Vec::with_capacity(a.truncation.limbs.len());
206 for (a_limb, c_limb) in a.truncation.limbs.into_iter().zip(c.truncation.limbs) {
207 let limb = self.gate().add(ctx, a_limb, Constant(c_limb));
208 limbs.push(limb);
209 }
210 let native = self.gate().add(ctx, a.native, Constant(c_native));
211 let trunc =
212 OverflowInteger::new(limbs, max(a.truncation.max_limb_bits, self.limb_bits) + 1);
213 let value = a.value + BigInt::from(c.value);
214
215 CRTInteger::new(trunc, native, value)
216 }
217
218 fn sub_no_carry(
219 &self,
220 ctx: &mut Context<F>,
221 a: impl Into<CRTInteger<F>>,
222 b: impl Into<CRTInteger<F>>,
223 ) -> CRTInteger<F> {
224 sub_no_carry::crt::<F>(self.gate(), ctx, a.into(), b.into())
225 }
226
227 fn negate(&self, ctx: &mut Context<F>, a: ProperCrtUint<F>) -> ProperCrtUint<F> {
232 let p = self.load_constant_uint(ctx, self.p.to_biguint().unwrap());
234 let (out_or_p, underflow) =
235 sub::crt(self.range(), ctx, p, a.clone(), self.limb_bits, self.limb_bases[1]);
236 self.gate().assert_is_const(ctx, &underflow, &F::ZERO);
238
239 let a_is_zero = big_is_zero::positive(self.gate(), ctx, a.0.truncation.clone());
240 ProperCrtUint(select::crt(self.gate(), ctx, a.0, out_or_p, a_is_zero))
241 }
242
243 fn scalar_mul_no_carry(
244 &self,
245 ctx: &mut Context<F>,
246 a: impl Into<CRTInteger<F>>,
247 c: i64,
248 ) -> CRTInteger<F> {
249 scalar_mul_no_carry::crt(self.gate(), ctx, a.into(), c)
250 }
251
252 fn scalar_mul_and_add_no_carry(
253 &self,
254 ctx: &mut Context<F>,
255 a: impl Into<CRTInteger<F>>,
256 b: impl Into<CRTInteger<F>>,
257 c: i64,
258 ) -> CRTInteger<F> {
259 scalar_mul_and_add_no_carry::crt(self.gate(), ctx, a.into(), b.into(), c)
260 }
261
262 fn mul_no_carry(
263 &self,
264 ctx: &mut Context<F>,
265 a: impl Into<CRTInteger<F>>,
266 b: impl Into<CRTInteger<F>>,
267 ) -> CRTInteger<F> {
268 mul_no_carry::crt(self.gate(), ctx, a.into(), b.into(), self.num_limbs_log2_ceil)
269 }
270
271 fn check_carry_mod_to_zero(&self, ctx: &mut Context<F>, a: CRTInteger<F>) {
272 check_carry_mod_to_zero::crt::<F>(
273 self.range(),
274 ctx,
275 a,
276 self.num_limbs_bits,
277 &self.p,
278 &self.p_limbs,
279 self.p_native,
280 self.limb_bits,
281 &self.limb_bases,
282 &self.limb_base_big,
283 )
284 }
285
286 fn carry_mod(&self, ctx: &mut Context<F>, a: CRTInteger<F>) -> ProperCrtUint<F> {
287 carry_mod::crt::<F>(
288 self.range(),
289 ctx,
290 a,
291 self.num_limbs_bits,
292 &self.p,
293 &self.p_limbs,
294 self.p_native,
295 self.limb_bits,
296 &self.limb_bases,
297 &self.limb_base_big,
298 )
299 }
300
301 fn range_check(
305 &self,
306 ctx: &mut Context<F>,
307 a: impl Into<ProperCrtUint<F>>,
308 max_bits: usize, ) {
310 let n = self.limb_bits;
311 let a = a.into();
312 let mut remaining_bits = max_bits;
313
314 debug_assert!(a.0.value.bits() as usize <= max_bits);
315
316 for cell in a.0.truncation.limbs {
318 let limb_bits = cmp::min(n, remaining_bits);
319 remaining_bits -= limb_bits;
320 self.range.range_check(ctx, cell, limb_bits);
321 }
322 }
323
324 fn enforce_less_than(
325 &self,
326 ctx: &mut Context<F>,
327 a: ProperCrtUint<F>,
328 ) -> Reduced<ProperCrtUint<F>, Fp> {
329 self.enforce_less_than_p(ctx, a.clone());
330 Reduced(a, PhantomData)
331 }
332
333 fn is_soft_zero(
335 &self,
336 ctx: &mut Context<F>,
337 a: impl Into<ProperCrtUint<F>>,
338 ) -> AssignedValue<F> {
339 let a = a.into();
340 big_is_zero::positive(self.gate(), ctx, a.0.truncation)
341 }
342
343 fn is_soft_nonzero(
348 &self,
349 ctx: &mut Context<F>,
350 a: impl Into<ProperCrtUint<F>>,
351 ) -> AssignedValue<F> {
352 let a = a.into();
353 let is_zero = big_is_zero::positive(self.gate(), ctx, a.0.truncation.clone());
354 let is_nonzero = self.gate().not(ctx, is_zero);
355
356 let p = self.load_constant_uint(ctx, self.p.to_biguint().unwrap());
358 let (_, underflow) =
359 sub::crt::<F>(self.range(), ctx, a, p, self.limb_bits, self.limb_bases[1]);
360 let is_underflow_zero = self.gate().is_zero(ctx, underflow);
361 let no_underflow = self.gate().not(ctx, is_underflow_zero);
362
363 self.gate().and(ctx, is_nonzero, no_underflow)
364 }
365
366 fn is_zero(&self, ctx: &mut Context<F>, a: impl Into<ProperCrtUint<F>>) -> AssignedValue<F> {
370 let a = a.into();
371 self.enforce_less_than_p(ctx, a.clone());
372 big_is_zero::positive(self.gate(), ctx, a.0.truncation)
374 }
375
376 fn is_equal_unenforced(
377 &self,
378 ctx: &mut Context<F>,
379 a: Reduced<ProperCrtUint<F>, Fp>,
380 b: Reduced<ProperCrtUint<F>, Fp>,
381 ) -> AssignedValue<F> {
382 big_is_equal::assign::<F>(self.gate(), ctx, a.0, b.0)
383 }
384
385 fn assert_equal(
389 &self,
390 ctx: &mut Context<F>,
391 a: impl Into<ProperCrtUint<F>>,
392 b: impl Into<ProperCrtUint<F>>,
393 ) {
394 let a = a.into();
395 let b = b.into();
396 for (limb_a, limb_b) in a.limbs().iter().zip(b.limbs().iter()) {
398 ctx.constrain_equal(limb_a, limb_b);
399 }
400 self.enforce_less_than_p(ctx, a);
401 self.enforce_less_than_p(ctx, b);
402 }
403}
404
405impl<F: BigPrimeField, Fp: BigPrimeField> Selectable<F, CRTInteger<F>> for FpChip<'_, F, Fp> {
406 fn select(
407 &self,
408 ctx: &mut Context<F>,
409 a: CRTInteger<F>,
410 b: CRTInteger<F>,
411 sel: AssignedValue<F>,
412 ) -> CRTInteger<F> {
413 select::crt(self.gate(), ctx, a, b, sel)
414 }
415
416 fn select_by_indicator(
417 &self,
418 ctx: &mut Context<F>,
419 a: &impl AsRef<[CRTInteger<F>]>,
420 coeffs: &[AssignedValue<F>],
421 ) -> CRTInteger<F> {
422 select_by_indicator::crt(self.gate(), ctx, a.as_ref(), coeffs, &self.limb_bases)
423 }
424}
425
426impl<F: BigPrimeField, Fp: BigPrimeField> Selectable<F, ProperCrtUint<F>> for FpChip<'_, F, Fp> {
427 fn select(
428 &self,
429 ctx: &mut Context<F>,
430 a: ProperCrtUint<F>,
431 b: ProperCrtUint<F>,
432 sel: AssignedValue<F>,
433 ) -> ProperCrtUint<F> {
434 ProperCrtUint(select::crt(self.gate(), ctx, a.0, b.0, sel))
435 }
436
437 fn select_by_indicator(
438 &self,
439 ctx: &mut Context<F>,
440 a: &impl AsRef<[ProperCrtUint<F>]>,
441 coeffs: &[AssignedValue<F>],
442 ) -> ProperCrtUint<F> {
443 let out = select_by_indicator::crt(self.gate(), ctx, a.as_ref(), coeffs, &self.limb_bases);
444 ProperCrtUint(out)
445 }
446}
447
448impl<F: BigPrimeField, Fp, Pt: Clone, FC> Selectable<F, Reduced<Pt, Fp>> for FC
449where
450 FC: Selectable<F, Pt>,
451{
452 fn select(
453 &self,
454 ctx: &mut Context<F>,
455 a: Reduced<Pt, Fp>,
456 b: Reduced<Pt, Fp>,
457 sel: AssignedValue<F>,
458 ) -> Reduced<Pt, Fp> {
459 Reduced(self.select(ctx, a.0, b.0, sel), PhantomData)
460 }
461
462 fn select_by_indicator(
463 &self,
464 ctx: &mut Context<F>,
465 a: &impl AsRef<[Reduced<Pt, Fp>]>,
466 coeffs: &[AssignedValue<F>],
467 ) -> Reduced<Pt, Fp> {
468 let a = a.as_ref().iter().map(|a| a.0.clone()).collect::<Vec<_>>();
470 Reduced(self.select_by_indicator(ctx, &a, coeffs), PhantomData)
471 }
472}