halo2_base/gates/range/mod.rs
1use crate::{
2 gates::flex_gate::{FlexGateConfig, GateInstructions, MAX_PHASE},
3 halo2_proofs::{
4 circuit::{Layouter, Value},
5 plonk::{
6 Advice, Column, ConstraintSystem, Error, SecondPhase, Selector, TableColumn, ThirdPhase,
7 },
8 poly::Rotation,
9 },
10 utils::{
11 biguint_to_fe, bit_length, decompose_fe_to_u64_limbs, fe_to_biguint, BigPrimeField,
12 ScalarField,
13 },
14 virtual_region::lookups::LookupAnyManager,
15 AssignedValue, Context,
16 QuantumCell::{self, Constant, Existing, Witness},
17};
18
19use super::flex_gate::{FlexGateConfigParams, GateChip};
20
21use getset::Getters;
22use num_bigint::BigUint;
23use num_integer::Integer;
24use num_traits::One;
25use std::{cmp::Ordering, ops::Shl};
26
27/// Configuration for Range Chip
28#[derive(Clone, Debug)]
29pub struct RangeConfig<F: ScalarField> {
30 /// Underlying Gate Configuration
31 pub gate: FlexGateConfig<F>,
32 /// Special advice (witness) Columns used only for lookup tables.
33 ///
34 /// Each phase of a halo2 circuit has a distinct lookup_advice column.
35 ///
36 /// * If `gate` has only 1 advice column, lookups are enabled for that column, in which case `lookup_advice` is empty
37 /// * If `gate` has more than 1 advice column some number of user-specified `lookup_advice` columns are added
38 /// * In this case, we don't need a selector so `q_lookup` is empty
39 pub lookup_advice: Vec<Vec<Column<Advice>>>,
40 /// Selector values for the lookup table.
41 pub q_lookup: Vec<Option<Selector>>,
42 /// Column for lookup table values.
43 pub lookup: TableColumn,
44 /// Defines the number of bits represented in the lookup table [0,2^<sup>lookup_bits</sup>).
45 lookup_bits: usize,
46}
47
48impl<F: ScalarField> RangeConfig<F> {
49 /// Generates a new [RangeConfig] with the specified parameters.
50 ///
51 /// If `num_columns` is 0, then we assume you do not want to perform any lookups in that phase.
52 ///
53 /// Panics if `lookup_bits` > 28.
54 /// * `meta`: [ConstraintSystem] of the circuit
55 /// * `gate_params`: see [FlexGateConfigParams]
56 /// * `num_lookup_advice`: Number of `lookup_advice` [Column]s in each phase
57 /// * `lookup_bits`: Number of bits represented in the LookUp table [0,2^lookup_bits)
58 pub fn configure(
59 meta: &mut ConstraintSystem<F>,
60 gate_params: FlexGateConfigParams,
61 num_lookup_advice: &[usize],
62 lookup_bits: usize,
63 ) -> Self {
64 assert!(lookup_bits <= F::S as usize);
65 // sanity check: only create lookup table if there are lookup_advice columns
66 assert!(!num_lookup_advice.is_empty(), "You are creating a RangeConfig but don't seem to need a lookup table, please double-check if you're using lookups correctly. Consider setting lookup_bits = None in BaseConfigParams");
67
68 let lookup = meta.lookup_table_column();
69
70 let gate = FlexGateConfig::configure(meta, gate_params.clone());
71
72 // For now, we apply the same range lookup table to each phase
73 let mut q_lookup = Vec::new();
74 let mut lookup_advice = Vec::new();
75 for (phase, &num_columns) in num_lookup_advice.iter().enumerate() {
76 let num_advice = *gate_params.num_advice_per_phase.get(phase).unwrap_or(&0);
77 let mut columns = Vec::new();
78 // If num_columns is set to 0, then we assume you do not want to perform any lookups in that phase.
79 // Disable this optimization in phase > 0 because you might set selectors based a cell from other columns.
80 if phase == 0 && num_advice == 1 && num_columns != 0 {
81 q_lookup.push(Some(meta.complex_selector()));
82 } else {
83 q_lookup.push(None);
84 for _ in 0..num_columns {
85 let a = match phase {
86 0 => meta.advice_column(),
87 1 => meta.advice_column_in(SecondPhase),
88 2 => meta.advice_column_in(ThirdPhase),
89 _ => panic!("Currently RangeConfig only supports {MAX_PHASE} phases"),
90 };
91 meta.enable_equality(a);
92 columns.push(a);
93 }
94 }
95 lookup_advice.push(columns);
96 }
97
98 let mut config = Self { lookup_advice, q_lookup, lookup, lookup_bits, gate };
99 config.create_lookup(meta);
100
101 log::info!("Poisoned rows after RangeConfig::configure {}", meta.minimum_rows());
102 config.gate.max_rows = (1 << gate_params.k) - meta.minimum_rows();
103 assert!(
104 (1 << lookup_bits) <= config.gate.max_rows,
105 "lookup table is too large for the circuit degree plus blinding factors!"
106 );
107
108 config
109 }
110
111 /// Returns the number of bits represented in the lookup table [0,2^<sup>lookup_bits</sup>).
112 pub fn lookup_bits(&self) -> usize {
113 self.lookup_bits
114 }
115
116 /// Instantiates the lookup table of the circuit.
117 /// * `meta`: [ConstraintSystem] of the circuit
118 fn create_lookup(&self, meta: &mut ConstraintSystem<F>) {
119 for (phase, q_l) in self.q_lookup.iter().enumerate() {
120 if let Some(q) = q_l {
121 meta.lookup("lookup", |meta| {
122 let q = meta.query_selector(*q);
123 // there should only be 1 advice column in phase `phase`
124 let a =
125 meta.query_advice(self.gate.basic_gates[phase][0].value, Rotation::cur());
126 vec![(q * a, self.lookup)]
127 });
128 }
129 }
130 //if multiple columns
131 for la in self.lookup_advice.iter().flat_map(|advices| advices.iter()) {
132 meta.lookup("lookup wo selector", |meta| {
133 let a = meta.query_advice(*la, Rotation::cur());
134 vec![(a, self.lookup)]
135 });
136 }
137 }
138
139 /// Loads the lookup table into the circuit using the provided `layouter`.
140 /// * `layouter`: layouter for the circuit
141 pub fn load_lookup_table(&self, layouter: &mut impl Layouter<F>) -> Result<(), Error> {
142 layouter.assign_table(
143 || format!("{} bit lookup", self.lookup_bits),
144 |mut table| {
145 for idx in 0..(1u32 << self.lookup_bits) {
146 table.assign_cell(
147 || "lookup table",
148 self.lookup,
149 idx as usize,
150 || Value::known(F::from(idx as u64)),
151 )?;
152 }
153 Ok(())
154 },
155 )?;
156 Ok(())
157 }
158}
159
160/// Trait that implements methods to constrain a field element number `x` is within a range of bits.
161pub trait RangeInstructions<F: ScalarField> {
162 /// The type of Gate used within the instructions.
163 type Gate: GateInstructions<F>;
164
165 /// Returns the type of gate used.
166 fn gate(&self) -> &Self::Gate;
167
168 /// Returns the number of bits the lookup table represents.
169 fn lookup_bits(&self) -> usize;
170
171 /// Checks and constrains that `a` lies in the range [0, 2<sup>range_bits</sup>).
172 ///
173 /// Inputs:
174 /// * `a`: [AssignedValue] value to be range checked
175 /// * `range_bits`: number of bits in the range
176 fn range_check(&self, ctx: &mut Context<F>, a: AssignedValue<F>, range_bits: usize);
177
178 /// Constrains that 'a' is less than 'b'.
179 ///
180 /// Assumes that `a` and `b` have bit length <= num_bits bits.
181 ///
182 /// Note: This may fail silently if a or b have more than num_bits.
183 /// * a: [QuantumCell] value to check
184 /// * b: upper bound expressed as a [QuantumCell]
185 /// * num_bits: number of bits used to represent the values of `a` and `b`
186 fn check_less_than(
187 &self,
188 ctx: &mut Context<F>,
189 a: impl Into<QuantumCell<F>>,
190 b: impl Into<QuantumCell<F>>,
191 num_bits: usize,
192 );
193
194 /// Performs a range check that `a` has at most `ceil(b.bits() / lookup_bits) * lookup_bits` bits and then constrains that `a` is less than `b`.
195 ///
196 /// * a: [AssignedValue] value to check
197 /// * b: upper bound expressed as a [u64] value
198 ///
199 /// ## Assumptions
200 /// * `ceil(b.bits() / lookup_bits) * lookup_bits <= F::CAPACITY`
201 fn check_less_than_safe(&self, ctx: &mut Context<F>, a: AssignedValue<F>, b: u64) {
202 let range_bits = bit_length(b).div_ceil(self.lookup_bits()) * self.lookup_bits();
203
204 self.range_check(ctx, a, range_bits);
205 self.check_less_than(ctx, a, Constant(F::from(b)), range_bits)
206 }
207
208 /// Performs a range check that `a` has at most `ceil(b.bits() / lookup_bits) * lookup_bits` bits and then constrains that `a` is less than `b`.
209 ///
210 /// * a: [AssignedValue] value to check
211 /// * b: upper bound expressed as a [BigUint] value
212 ///
213 /// ## Assumptions
214 /// * `ceil(b.bits() / lookup_bits) * lookup_bits <= F::CAPACITY`
215 fn check_big_less_than_safe(&self, ctx: &mut Context<F>, a: AssignedValue<F>, b: BigUint)
216 where
217 F: BigPrimeField,
218 {
219 let range_bits = (b.bits() as usize).div_ceil(self.lookup_bits()) * self.lookup_bits();
220
221 self.range_check(ctx, a, range_bits);
222 self.check_less_than(ctx, a, Constant(biguint_to_fe(&b)), range_bits)
223 }
224
225 /// Constrains whether `a` is in `[0, b)`, and returns 1 if `a` < `b`, otherwise 0.
226 ///
227 /// Assumes that`a` and `b` are known to have <= num_bits bits.
228 /// * a: first [QuantumCell] to compare
229 /// * b: second [QuantumCell] to compare
230 /// * num_bits: number of bits to represent the values
231 fn is_less_than(
232 &self,
233 ctx: &mut Context<F>,
234 a: impl Into<QuantumCell<F>>,
235 b: impl Into<QuantumCell<F>>,
236 num_bits: usize,
237 ) -> AssignedValue<F>;
238
239 /// Performs a range check that `a` has at most `ceil(bit_length(b) / lookup_bits) * lookup_bits` and then returns whether `a` is in `[0,b)`.
240 ///
241 /// Returns 1 if `a` < `b`, otherwise 0.
242 ///
243 /// * a: [AssignedValue] value to check
244 /// * b: upper bound as [u64] value
245 fn is_less_than_safe(
246 &self,
247 ctx: &mut Context<F>,
248 a: AssignedValue<F>,
249 b: u64,
250 ) -> AssignedValue<F> {
251 let range_bits = bit_length(b).div_ceil(self.lookup_bits()) * self.lookup_bits();
252
253 self.range_check(ctx, a, range_bits);
254 self.is_less_than(ctx, a, Constant(F::from(b)), range_bits)
255 }
256
257 /// Performs a range check that `a` has at most `ceil(b.bits() / lookup_bits) * lookup_bits` bits and then returns whether `a` is in `[0,b)`.
258 ///
259 /// Returns 1 if `a` < `b`, otherwise 0.
260 ///
261 /// * a: [AssignedValue] value to check
262 /// * b: upper bound as [BigUint] value
263 ///
264 /// For the current implementation using `is_less_than`, we require `ceil(b.bits() / lookup_bits) + 1 < F::NUM_BITS / lookup_bits`
265 fn is_big_less_than_safe(
266 &self,
267 ctx: &mut Context<F>,
268 a: AssignedValue<F>,
269 b: BigUint,
270 ) -> AssignedValue<F>
271 where
272 F: BigPrimeField,
273 {
274 let range_bits = (b.bits() as usize).div_ceil(self.lookup_bits()) * self.lookup_bits();
275
276 self.range_check(ctx, a, range_bits);
277 self.is_less_than(ctx, a, Constant(biguint_to_fe(&b)), range_bits)
278 }
279
280 /// Constrains and returns `(c, r)` such that `a = b * c + r`.
281 ///
282 /// * a: [QuantumCell] value to divide
283 /// * b: [BigUint] value to divide by
284 /// * a_num_bits: number of bits needed to represent the value of `a`
285 ///
286 /// ## Assumptions
287 /// * `b != 0` and that `a` has <= `a_num_bits` bits.
288 /// * `a_num_bits <= F::CAPACITY = F::NUM_BITS - 1`
289 /// * Unsafe behavior if `a_num_bits >= F::NUM_BITS`
290 fn div_mod(
291 &self,
292 ctx: &mut Context<F>,
293 a: impl Into<QuantumCell<F>>,
294 b: impl Into<BigUint>,
295 a_num_bits: usize,
296 ) -> (AssignedValue<F>, AssignedValue<F>)
297 where
298 F: BigPrimeField,
299 {
300 let a = a.into();
301 let b = b.into();
302 let a_val = fe_to_biguint(a.value());
303 let (div, rem) = a_val.div_mod_floor(&b);
304 let [div, rem] = [div, rem].map(|v| biguint_to_fe(&v));
305 ctx.assign_region([Witness(rem), Constant(biguint_to_fe(&b)), Witness(div), a], [0]);
306 let rem = ctx.get(-4);
307 let div = ctx.get(-2);
308 // Constrain that a_num_bits fulfills `div < 2 ** a_num_bits / b`.
309 self.check_big_less_than_safe(
310 ctx,
311 div,
312 BigUint::one().shl(a_num_bits as u32) / &b + BigUint::one(),
313 );
314 // Constrain that remainder is less than divisor (i.e. `r < b`).
315 self.check_big_less_than_safe(ctx, rem, b);
316 (div, rem)
317 }
318
319 /// Constrains and returns `(c, r)` such that `a = b * c + r`.
320 ///
321 /// Assumes:
322 /// that `b != 0`.
323 /// that `a` has <= `a_num_bits` bits.
324 /// that `b` has <= `b_num_bits` bits.
325 ///
326 /// Note:
327 /// Let `X = 2 ** b_num_bits`
328 /// Write `a = a1 * X + a0` and `c = c1 * X + c0`
329 /// If we write `b * c0 + r = d1 * X + d0` then
330 /// `b * c + r = (b * c1 + d1) * X + d0`
331 /// * a: [QuantumCell] value to divide
332 /// * b: [QuantumCell] value to divide by
333 /// * a_num_bits: number of bits needed to represent the value of `a`
334 /// * b_num_bits: number of bits needed to represent the value of `b`
335 ///
336 /// ## Assumptions
337 /// * `a_num_bits <= F::CAPACITY = F::NUM_BITS - 1`
338 /// * `b_num_bits <= F::CAPACITY = F::NUM_BITS - 1`
339 /// * Unsafe behavior if `a_num_bits >= F::NUM_BITS` or `b_num_bits >= F::NUM_BITS`
340 fn div_mod_var(
341 &self,
342 ctx: &mut Context<F>,
343 a: impl Into<QuantumCell<F>>,
344 b: impl Into<QuantumCell<F>>,
345 a_num_bits: usize,
346 b_num_bits: usize,
347 ) -> (AssignedValue<F>, AssignedValue<F>)
348 where
349 F: BigPrimeField,
350 {
351 let a = a.into();
352 let b = b.into();
353 let a_val = fe_to_biguint(a.value());
354 let b_val = fe_to_biguint(b.value());
355 let (div, rem) = a_val.div_mod_floor(&b_val);
356 let x = BigUint::one().shl(b_num_bits as u32);
357 let (div_hi, div_lo) = div.div_mod_floor(&x);
358
359 let x_fe = self.gate().pow_of_two()[b_num_bits];
360 let [div, div_hi, div_lo, rem] = [div, div_hi, div_lo, rem].map(|v| biguint_to_fe(&v));
361 ctx.assign_region(
362 [Witness(div_lo), Witness(div_hi), Constant(x_fe), Witness(div), Witness(rem)],
363 [0],
364 );
365 let [div_lo, div_hi, div, rem] = [-5, -4, -2, -1].map(|i| ctx.get(i));
366 self.range_check(ctx, div_lo, b_num_bits);
367 if a_num_bits <= b_num_bits {
368 self.gate().assert_is_const(ctx, &div_hi, &F::ZERO);
369 } else {
370 self.range_check(ctx, div_hi, a_num_bits - b_num_bits);
371 }
372
373 let (bcr0_hi, bcr0_lo) = {
374 let bcr0 = self.gate().mul_add(ctx, b, Existing(div_lo), Existing(rem));
375 self.div_mod(ctx, Existing(bcr0), x.clone(), a_num_bits)
376 };
377 let bcr_hi = self.gate().mul_add(ctx, b, Existing(div_hi), Existing(bcr0_hi));
378
379 let (a_hi, a_lo) = self.div_mod(ctx, a, x, a_num_bits);
380 ctx.constrain_equal(&bcr_hi, &a_hi);
381 ctx.constrain_equal(&bcr0_lo, &a_lo);
382
383 self.range_check(ctx, rem, b_num_bits);
384 self.check_less_than(ctx, Existing(rem), b, b_num_bits);
385 (div, rem)
386 }
387
388 /// Constrains and returns the last bit of the value of `a`.
389 ///
390 /// Assume `a` has been range checked already to `limb_bits` bits.
391 /// * a: [AssignedValue] value to get the last bit of
392 /// * limb_bits: number of bits in a limb
393 fn get_last_bit(
394 &self,
395 ctx: &mut Context<F>,
396 a: AssignedValue<F>,
397 limb_bits: usize,
398 ) -> AssignedValue<F> {
399 let a_big = fe_to_biguint(a.value());
400 let bit_v = F::from(a_big.bit(0));
401 let two = F::from(2u64);
402 let h_v = F::from_bytes_le(&(a_big >> 1usize).to_bytes_le());
403
404 ctx.assign_region([Witness(bit_v), Witness(h_v), Constant(two), Existing(a)], [0]);
405 let half = ctx.get(-3);
406 let bit = ctx.get(-4);
407
408 self.range_check(ctx, half, limb_bits - 1);
409 self.gate().assert_bit(ctx, bit);
410 bit
411 }
412}
413
414/// # RangeChip
415/// This chip provides methods that rely on "range checking" that a field element `x` is within a range of bits.
416/// Range checks are done using a lookup table with the numbers [0, 2<sup>lookup_bits</sup>).
417#[derive(Clone, Debug, Getters)]
418pub struct RangeChip<F: ScalarField> {
419 /// Underlying [GateChip] for this chip.
420 pub gate: GateChip<F>,
421 /// Lookup manager for each phase, lazily initiated using the [`SharedCopyConstraintManager`](crate::virtual_region::copy_constraints::SharedCopyConstraintManager) from the [Context]
422 /// that first calls it.
423 ///
424 /// The lookup manager is used to store the cells that need to be looked up in the range check lookup table.
425 #[getset(get = "pub")]
426 lookup_manager: [LookupAnyManager<F, 1>; MAX_PHASE],
427 /// Defines the number of bits represented in the lookup table [0,2<sup>lookup_bits</sup>).
428 lookup_bits: usize,
429 /// [Vec] of powers of `2 ** lookup_bits` represented as [QuantumCell::Constant].
430 /// These are precomputed and cached as a performance optimization for later limb decompositions. We precompute up to the higher power that fits in `F`, which is `2 ** ((F::CAPACITY / lookup_bits) * lookup_bits)`.
431 pub limb_bases: Vec<QuantumCell<F>>,
432}
433
434impl<F: ScalarField> RangeChip<F> {
435 /// Creates a new [RangeChip] with the given strategy and lookup_bits.
436 /// * `lookup_bits`: number of bits represented in the lookup table [0,2<sup>lookup_bits</sup>)
437 /// * `lookup_manager`: a [LookupAnyManager] for each phase.
438 ///
439 /// **IMPORTANT:** It is **critical** that all `LookupAnyManager`s use the same [`SharedCopyConstraintManager`](crate::virtual_region::copy_constraints::SharedCopyConstraintManager)
440 /// as in your primary circuit builder.
441 ///
442 /// It is not advised to call this function directly. Instead you should call `BaseCircuitBuilder::range_chip`.
443 pub fn new(lookup_bits: usize, lookup_manager: [LookupAnyManager<F, 1>; MAX_PHASE]) -> Self {
444 let limb_base = F::from(1u64 << lookup_bits);
445 let mut running_base = limb_base;
446 let num_bases = F::CAPACITY as usize / lookup_bits;
447 let mut limb_bases = Vec::with_capacity(num_bases + 1);
448 limb_bases.extend([Constant(F::ONE), Constant(running_base)]);
449 for _ in 2..=num_bases {
450 running_base *= &limb_base;
451 limb_bases.push(Constant(running_base));
452 }
453 let gate = GateChip::new();
454
455 Self { gate, lookup_bits, lookup_manager, limb_bases }
456 }
457
458 fn add_cell_to_lookup(&self, ctx: &Context<F>, a: AssignedValue<F>) {
459 let phase = ctx.phase();
460 let manager = &self.lookup_manager[phase];
461 manager.add_lookup(ctx.tag(), [a]);
462 }
463
464 /// Checks and constrains that `a` lies in the range [0, 2<sup>range_bits</sup>).
465 ///
466 /// This is done by decomposing `a` into `num_limbs` limbs, where `num_limbs = ceil(range_bits / lookup_bits)`.
467 /// Each limb is constrained to be within the range [0, 2<sup>lookup_bits</sup>).
468 /// The limbs are then combined to form `a` again with the last limb having `rem_bits` number of bits.
469 ///
470 /// Returns the last (highest) limb.
471 ///
472 /// Inputs:
473 /// * `a`: [AssignedValue] value to be range checked
474 /// * `range_bits`: number of bits in the range
475 /// * `lookup_bits`: number of bits in the lookup table
476 ///
477 /// # Assumptions
478 /// * `ceil(range_bits / lookup_bits) * lookup_bits <= F::CAPACITY`
479 fn _range_check(
480 &self,
481 ctx: &mut Context<F>,
482 a: AssignedValue<F>,
483 range_bits: usize,
484 ) -> AssignedValue<F> {
485 if range_bits == 0 {
486 self.gate.assert_is_const(ctx, &a, &F::ZERO);
487 return a;
488 }
489 // the number of limbs
490 let num_limbs = range_bits.div_ceil(self.lookup_bits);
491 // println!("range check {} bits {} len", range_bits, k);
492 let rem_bits = range_bits % self.lookup_bits;
493
494 debug_assert!(self.limb_bases.len() >= num_limbs);
495
496 let last_limb = if num_limbs == 1 {
497 self.add_cell_to_lookup(ctx, a);
498 a
499 } else {
500 let limbs = decompose_fe_to_u64_limbs(a.value(), num_limbs, self.lookup_bits)
501 .into_iter()
502 .map(|x| Witness(F::from(x)));
503 let row_offset = ctx.advice.len() as isize;
504 let acc = self.gate.inner_product(ctx, limbs, self.limb_bases[..num_limbs].to_vec());
505 // the inner product above must equal `a`
506 ctx.constrain_equal(&a, &acc);
507 // we fetch the cells to lookup by getting the indices where `limbs` were assigned in `inner_product`. Because `limb_bases[0]` is 1, the progression of indices is 0,1,4,...,4+3*i
508 self.add_cell_to_lookup(ctx, ctx.get(row_offset));
509 for i in 0..num_limbs - 1 {
510 self.add_cell_to_lookup(ctx, ctx.get(row_offset + 1 + 3 * i as isize));
511 }
512 ctx.get(row_offset + 1 + 3 * (num_limbs - 2) as isize)
513 };
514
515 // additional constraints for the last limb if rem_bits != 0
516 match rem_bits.cmp(&1) {
517 // we want to check x := limbs[num_limbs-1] is boolean
518 // we constrain x*(x-1) = 0 + x * x - x == 0
519 // | 0 | x | x | x |
520 Ordering::Equal => {
521 self.gate.assert_bit(ctx, last_limb);
522 }
523 Ordering::Greater => {
524 let mult_val = self.gate.pow_of_two[self.lookup_bits - rem_bits];
525 let check = self.gate.mul(ctx, last_limb, Constant(mult_val));
526 self.add_cell_to_lookup(ctx, check);
527 }
528 _ => {}
529 }
530 last_limb
531 }
532}
533
534impl<F: ScalarField> RangeInstructions<F> for RangeChip<F> {
535 type Gate = GateChip<F>;
536
537 /// The type of Gate used in this chip.
538 fn gate(&self) -> &Self::Gate {
539 &self.gate
540 }
541
542 /// Returns the number of bits represented in the lookup table [0,2<sup>lookup_bits</sup>).
543 fn lookup_bits(&self) -> usize {
544 self.lookup_bits
545 }
546
547 /// Checks and constrains that `a` lies in the range [0, 2<sup>range_bits</sup>).
548 ///
549 /// This is done by decomposing `a` into `num_limbs` limbs, where `num_limbs = ceil(range_bits / lookup_bits)`.
550 /// Each limb is constrained to be within the range [0, 2<sup>lookup_bits</sup>).
551 /// The limbs are then combined to form `a` again with the last limb having `rem_bits` number of bits.
552 ///
553 /// Inputs:
554 /// * `a`: [AssignedValue] value to be range checked
555 /// * `range_bits`: number of bits in the range
556 ///
557 /// # Assumptions
558 /// * `ceil(range_bits / lookup_bits) * lookup_bits <= F::CAPACITY`
559 fn range_check(&self, ctx: &mut Context<F>, a: AssignedValue<F>, range_bits: usize) {
560 self._range_check(ctx, a, range_bits);
561 }
562
563 /// Constrains that 'a' is less than 'b'.
564 ///
565 /// Assumes that`a` and `b` are known to have <= num_bits bits.
566 ///
567 /// Note: This may fail silently if a or b have more than num_bits
568 /// * a: [QuantumCell] value to check
569 /// * b: upper bound expressed as a [QuantumCell]
570 /// * num_bits: number of bits to represent the values
571 fn check_less_than(
572 &self,
573 ctx: &mut Context<F>,
574 a: impl Into<QuantumCell<F>>,
575 b: impl Into<QuantumCell<F>>,
576 num_bits: usize,
577 ) {
578 let a = a.into();
579 let b = b.into();
580 let pow_of_two = self.gate.pow_of_two[num_bits];
581 let check_cell = {
582 let shift_a_val = pow_of_two + a.value();
583 // | a + 2^(num_bits) - b | b | 1 | a + 2^(num_bits) | - 2^(num_bits) | 1 | a |
584 let cells = [
585 Witness(shift_a_val - b.value()),
586 b,
587 Constant(F::ONE),
588 Witness(shift_a_val),
589 Constant(-pow_of_two),
590 Constant(F::ONE),
591 a,
592 ];
593 ctx.assign_region(cells, [0, 3]);
594 ctx.get(-7)
595 };
596
597 self.range_check(ctx, check_cell, num_bits);
598 }
599
600 /// Constrains whether `a` is in `[0, b)`, and returns 1 if `a` < `b`, otherwise 0.
601 ///
602 /// * a: first [QuantumCell] to compare
603 /// * b: second [QuantumCell] to compare
604 /// * num_bits: number of bits to represent the values
605 ///
606 /// # Assumptions
607 /// * `a` and `b` are known to have `<= num_bits` bits.
608 /// * (`ceil(num_bits / lookup_bits) + 1) * lookup_bits <= F::CAPACITY`
609 fn is_less_than(
610 &self,
611 ctx: &mut Context<F>,
612 a: impl Into<QuantumCell<F>>,
613 b: impl Into<QuantumCell<F>>,
614 num_bits: usize,
615 ) -> AssignedValue<F> {
616 let a = a.into();
617 let b = b.into();
618
619 let k = num_bits.div_ceil(self.lookup_bits);
620 let padded_bits = k * self.lookup_bits;
621 debug_assert!(
622 padded_bits + self.lookup_bits <= F::CAPACITY as usize,
623 "num_bits is too large for this is_less_than implementation"
624 );
625 let pow_padded = self.gate.pow_of_two[padded_bits];
626
627 let shift_a_val = pow_padded + a.value();
628 let shifted_val = shift_a_val - b.value();
629 let shifted_cell = {
630 ctx.assign_region(
631 [
632 Witness(shifted_val),
633 b,
634 Constant(F::ONE),
635 Witness(shift_a_val),
636 Constant(-pow_padded),
637 Constant(F::ONE),
638 a,
639 ],
640 [0, 3],
641 );
642 ctx.get(-7)
643 };
644
645 // check whether a - b + 2^padded_bits < 2^padded_bits ?
646 // since assuming a, b < 2^padded_bits we are guaranteed a - b + 2^padded_bits < 2^{padded_bits + 1}
647 let last_limb = self._range_check(ctx, shifted_cell, padded_bits + self.lookup_bits);
648 // last_limb will have the (k + 1)-th limb of `a - b + 2^{k * limb_bits}`, which is zero iff `a < b`
649 self.gate.is_zero(ctx, last_limb)
650 }
651}