pub type FpChip<'range, F> = FpChip<'range, F, Fq>;
Aliased Type§
struct FpChip<'range, F> {
pub range: &'range RangeChip<F>,
pub limb_bits: usize,
pub num_limbs: usize,
pub num_limbs_bits: usize,
pub num_limbs_log2_ceil: usize,
pub limb_bases: Vec<F>,
pub limb_base_big: BigInt,
pub limb_mask: BigUint,
pub p: BigInt,
pub p_limbs: Vec<F>,
pub p_native: F,
pub native_modulus: BigUint,
/* private fields */
}
Fields§
§range: &'range RangeChip<F>
§limb_bits: usize
§num_limbs: usize
§num_limbs_bits: usize
§num_limbs_log2_ceil: usize
§limb_bases: Vec<F>
§limb_base_big: BigInt
§limb_mask: BigUint
§p: BigInt
§p_limbs: Vec<F>
§p_native: F
§native_modulus: BigUint
Implementations
Source§impl<'range, F: BigPrimeField, Fp: BigPrimeField> FpChip<'range, F, Fp>
impl<'range, F: BigPrimeField, Fp: BigPrimeField> FpChip<'range, F, Fp>
pub fn new( range: &'range RangeChip<F>, limb_bits: usize, num_limbs: usize, ) -> Self
pub fn enforce_less_than_p(&self, ctx: &mut Context<F>, a: ProperCrtUint<F>)
pub fn load_constant_uint( &self, ctx: &mut Context<F>, a: BigUint, ) -> ProperCrtUint<F>
Trait Implementations
Source§impl<'range, F: Clone + BigPrimeField, Fp: Clone + BigPrimeField> Clone for FpChip<'range, F, Fp>
impl<'range, F: Clone + BigPrimeField, Fp: Clone + BigPrimeField> Clone for FpChip<'range, F, Fp>
Source§impl<'range, F: Debug + BigPrimeField, Fp: Debug + BigPrimeField> Debug for FpChip<'range, F, Fp>
impl<'range, F: Debug + BigPrimeField, Fp: Debug + BigPrimeField> Debug for FpChip<'range, F, Fp>
Source§impl<'range, F: BigPrimeField, Fp: BigPrimeField> FieldChip<F> for FpChip<'range, F, Fp>
impl<'range, F: BigPrimeField, Fp: BigPrimeField> FieldChip<F> for FpChip<'range, F, Fp>
Source§fn range_check(
&self,
ctx: &mut Context<F>,
a: impl Into<ProperCrtUint<F>>,
max_bits: usize,
)
fn range_check( &self, ctx: &mut Context<F>, a: impl Into<ProperCrtUint<F>>, max_bits: usize, )
§Assumptions
max_bits <= n * k
wheren = self.limb_bits
andk = self.num_limbs
a.truncation.limbs.len() = self.num_limbs
Source§fn is_soft_zero(
&self,
ctx: &mut Context<F>,
a: impl Into<ProperCrtUint<F>>,
) -> AssignedValue<F>
fn is_soft_zero( &self, ctx: &mut Context<F>, a: impl Into<ProperCrtUint<F>>, ) -> AssignedValue<F>
Returns 1 iff a
is 0 as a BigUint. This means that even if a
is 0 modulo p
, this may return 0.
Source§fn is_soft_nonzero(
&self,
ctx: &mut Context<F>,
a: impl Into<ProperCrtUint<F>>,
) -> AssignedValue<F>
fn is_soft_nonzero( &self, ctx: &mut Context<F>, a: impl Into<ProperCrtUint<F>>, ) -> AssignedValue<F>
Given proper CRT integer a
, returns 1 iff a < modulus::<F>()
and a != 0
as integers
§Assumptions
a
is proper representation of BigUint
const PRIME_FIELD_NUM_BITS: u32 = Fp::NUM_BITS
Source§type UnsafeFieldPoint = CRTInteger<F>
type UnsafeFieldPoint = CRTInteger<F>
A representation of a field element that is used for intermediate computations.
The representation can have “overflows” (e.g., overflow limbs or negative limbs).
Source§type FieldPoint = ProperCrtUint<F>
type FieldPoint = ProperCrtUint<F>
The “proper” representation of a field element. Allowed to be a non-unique representation of a field element (e.g., can be greater than modulus)
Source§type ReducedFieldPoint = Reduced<ProperCrtUint<F>, Fp>
type ReducedFieldPoint = Reduced<ProperCrtUint<F>, Fp>
A proper representation of field elements that guarantees a unique representation of each field element. Typically this means Uints that are less than the modulus.
Source§type FieldType = Fp
type FieldType = Fp
A type implementing
Field
trait to help with witness generation (for example with inverse)type RangeChip = RangeChip<F>
fn native_modulus(&self) -> &BigUint
fn range(&self) -> &'range Self::RangeChip
fn limb_bits(&self) -> usize
fn get_assigned_value(&self, x: &CRTInteger<F>) -> Fp
Source§fn load_private(&self, ctx: &mut Context<F>, a: Fp) -> ProperCrtUint<F>
fn load_private(&self, ctx: &mut Context<F>, a: Fp) -> ProperCrtUint<F>
Assigns
fe
as private witness. Note that the witness may not be constrained to be a unique representation of the field element fe
.Source§fn load_constant(&self, ctx: &mut Context<F>, a: Fp) -> ProperCrtUint<F>
fn load_constant(&self, ctx: &mut Context<F>, a: Fp) -> ProperCrtUint<F>
Assigns
fe
as constant.fn add_no_carry( &self, ctx: &mut Context<F>, a: impl Into<CRTInteger<F>>, b: impl Into<CRTInteger<F>>, ) -> CRTInteger<F>
Source§fn add_constant_no_carry(
&self,
ctx: &mut Context<F>,
a: impl Into<CRTInteger<F>>,
c: Fp,
) -> CRTInteger<F>
fn add_constant_no_carry( &self, ctx: &mut Context<F>, a: impl Into<CRTInteger<F>>, c: Fp, ) -> CRTInteger<F>
output:
a + c
fn sub_no_carry( &self, ctx: &mut Context<F>, a: impl Into<CRTInteger<F>>, b: impl Into<CRTInteger<F>>, ) -> CRTInteger<F>
fn negate(&self, ctx: &mut Context<F>, a: ProperCrtUint<F>) -> ProperCrtUint<F>
Source§fn scalar_mul_no_carry(
&self,
ctx: &mut Context<F>,
a: impl Into<CRTInteger<F>>,
c: i64,
) -> CRTInteger<F>
fn scalar_mul_no_carry( &self, ctx: &mut Context<F>, a: impl Into<CRTInteger<F>>, c: i64, ) -> CRTInteger<F>
a * c
Source§fn scalar_mul_and_add_no_carry(
&self,
ctx: &mut Context<F>,
a: impl Into<CRTInteger<F>>,
b: impl Into<CRTInteger<F>>,
c: i64,
) -> CRTInteger<F>
fn scalar_mul_and_add_no_carry( &self, ctx: &mut Context<F>, a: impl Into<CRTInteger<F>>, b: impl Into<CRTInteger<F>>, c: i64, ) -> CRTInteger<F>
a * c + b
fn mul_no_carry( &self, ctx: &mut Context<F>, a: impl Into<CRTInteger<F>>, b: impl Into<CRTInteger<F>>, ) -> CRTInteger<F>
fn check_carry_mod_to_zero(&self, ctx: &mut Context<F>, a: CRTInteger<F>)
fn carry_mod(&self, ctx: &mut Context<F>, a: CRTInteger<F>) -> ProperCrtUint<F>
Source§fn enforce_less_than(
&self,
ctx: &mut Context<F>,
a: ProperCrtUint<F>,
) -> Reduced<ProperCrtUint<F>, Fp>
fn enforce_less_than( &self, ctx: &mut Context<F>, a: ProperCrtUint<F>, ) -> Reduced<ProperCrtUint<F>, Fp>
Constrains that
a
is a reduced representation and returns the wrapped a
.fn is_zero( &self, ctx: &mut Context<F>, a: impl Into<ProperCrtUint<F>>, ) -> AssignedValue<F>
fn is_equal_unenforced( &self, ctx: &mut Context<F>, a: Reduced<ProperCrtUint<F>, Fp>, b: Reduced<ProperCrtUint<F>, Fp>, ) -> AssignedValue<F>
fn assert_equal( &self, ctx: &mut Context<F>, a: impl Into<ProperCrtUint<F>>, b: impl Into<ProperCrtUint<F>>, )
fn gate(&self) -> &<Self::RangeChip as RangeInstructions<F>>::Gate
Source§fn load_private_reduced(
&self,
ctx: &mut Context<F>,
fe: Self::FieldType,
) -> Self::ReducedFieldPoint
fn load_private_reduced( &self, ctx: &mut Context<F>, fe: Self::FieldType, ) -> Self::ReducedFieldPoint
Assigns
fe
as private witness and contrains the witness to be in reduced form.fn is_equal( &self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>, b: impl Into<Self::FieldPoint>, ) -> AssignedValue<F>
Source§fn mul(
&self,
ctx: &mut Context<F>,
a: impl Into<Self::UnsafeFieldPoint>,
b: impl Into<Self::UnsafeFieldPoint>,
) -> Self::FieldPoint
fn mul( &self, ctx: &mut Context<F>, a: impl Into<Self::UnsafeFieldPoint>, b: impl Into<Self::UnsafeFieldPoint>, ) -> Self::FieldPoint
If using
UnsafeFieldPoint
, make sure multiplication does not cause overflow.Source§fn divide(
&self,
ctx: &mut Context<F>,
a: impl Into<Self::FieldPoint>,
b: impl Into<Self::FieldPoint>,
) -> Self::FieldPoint
fn divide( &self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>, b: impl Into<Self::FieldPoint>, ) -> Self::FieldPoint
Constrains that
b
is nonzero as a field element and then returns a / b
.Source§fn divide_unsafe(
&self,
ctx: &mut Context<F>,
a: impl Into<Self::UnsafeFieldPoint>,
b: impl Into<Self::UnsafeFieldPoint>,
) -> Self::FieldPoint
fn divide_unsafe( &self, ctx: &mut Context<F>, a: impl Into<Self::UnsafeFieldPoint>, b: impl Into<Self::UnsafeFieldPoint>, ) -> Self::FieldPoint
Source§fn neg_divide(
&self,
ctx: &mut Context<F>,
a: impl Into<Self::FieldPoint>,
b: impl Into<Self::FieldPoint>,
) -> Self::FieldPoint
fn neg_divide( &self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>, b: impl Into<Self::FieldPoint>, ) -> Self::FieldPoint
Constrains that
b
is nonzero as a field element and then returns -a / b
.