pub type Fp12Chip<'chip, F> = Fp12Chip<'chip, F, FpChip<'chip, F>, Fq12, 9>;
Aliased Type§
struct Fp12Chip<'chip, F>(pub FieldVectorChip<'chip, F, FpChip<'chip, F, Fq>>, _);
Fields§
§0: FieldVectorChip<'chip, F, FpChip<'chip, F, Fq>>
Implementations§
Source§impl<F: BigPrimeField> Fp12Chip<'_, F>
impl<F: BigPrimeField> Fp12Chip<'_, F>
pub fn frobenius_map( &self, ctx: &mut Context<F>, a: &<Self as FieldChip<F>>::FieldPoint, power: usize, ) -> <Self as FieldChip<F>>::FieldPoint
Sourcepub fn pow(
&self,
ctx: &mut Context<F>,
a: &<Self as FieldChip<F>>::FieldPoint,
exp: Vec<u64>,
) -> <Self as FieldChip<F>>::FieldPoint
pub fn pow( &self, ctx: &mut Context<F>, a: &<Self as FieldChip<F>>::FieldPoint, exp: Vec<u64>, ) -> <Self as FieldChip<F>>::FieldPoint
§Assumptions
a
is nonzero field point
Sourcepub fn cyclotomic_compress(&self, a: &FqPoint<F>) -> Vec<FqPoint<F>> ⓘ
pub fn cyclotomic_compress(&self, a: &FqPoint<F>) -> Vec<FqPoint<F>> ⓘ
in = g0 + g2 w + g4 w^2 + g1 w^3 + g3 w^4 + g5 w^5 where g_i = g_i0 + g_i1 * u are elements of Fp2 out = Compress(in) = [ g2, g3, g4, g5 ]
Sourcepub fn cyclotomic_decompress(
&self,
ctx: &mut Context<F>,
compression: Vec<FqPoint<F>>,
) -> FqPoint<F>
pub fn cyclotomic_decompress( &self, ctx: &mut Context<F>, compression: Vec<FqPoint<F>>, ) -> FqPoint<F>
Input:
compression = [g2, g3, g4, g5]
where g_i are proper elements of Fp2
Output:
Decompress(compression) = g0 + g2 w + g4 w^2 + g1 w^3 + g3 w^4 + g5 w^5
where- All elements of output are proper elements of Fp2 and: c = XI0 + u if g2 != 0: g1 = (g5^2 * c + 3 g4^2 - 2 g3)/(4g2) g0 = (2 g1^2 + g2 * g5 - 3 g3*g4) * c + 1 if g2 = 0: g1 = (2 g4 * g5)/g3 g0 = (2 g1^2 - 3 g3 * g4) * c + 1
pub fn cyclotomic_square( &self, ctx: &mut Context<F>, compression: &[FqPoint<F>], ) -> Vec<FqPoint<F>> ⓘ
Sourcepub fn cyclotomic_pow(
&self,
ctx: &mut Context<F>,
a: FqPoint<F>,
exp: Vec<u64>,
) -> FqPoint<F>
pub fn cyclotomic_pow( &self, ctx: &mut Context<F>, a: FqPoint<F>, exp: Vec<u64>, ) -> FqPoint<F>
§Assumptions
a
is a nonzero element in the cyclotomic subgroup
pub fn hard_part_BN( &self, ctx: &mut Context<F>, m: <Self as FieldChip<F>>::FieldPoint, ) -> <Self as FieldChip<F>>::FieldPoint
Sourcepub fn easy_part(
&self,
ctx: &mut Context<F>,
a: <Self as FieldChip<F>>::FieldPoint,
) -> <Self as FieldChip<F>>::FieldPoint
pub fn easy_part( &self, ctx: &mut Context<F>, a: <Self as FieldChip<F>>::FieldPoint, ) -> <Self as FieldChip<F>>::FieldPoint
§Assumptions
a
is nonzero field point
pub fn final_exp( &self, ctx: &mut Context<F>, a: <Self as FieldChip<F>>::FieldPoint, ) -> <Self as FieldChip<F>>::FieldPoint
Source§impl<'a, F, FpChip, Fp12, const XI_0: i64> Fp12Chip<'a, F, FpChip, Fp12, XI_0>
impl<'a, F, FpChip, Fp12, const XI_0: i64> Fp12Chip<'a, F, FpChip, Fp12, XI_0>
Sourcepub fn new(fp_chip: &'a FpChip) -> Self
pub fn new(fp_chip: &'a FpChip) -> Self
User must construct an FpChip
first using a config. This is intended so everything shares a single FlexGateChip
, which is needed for the column allocation to work.
pub fn fp_chip(&self) -> &FpChip
pub fn fp2_mul_no_carry( &self, ctx: &mut Context<F>, fp12_pt: FieldVector<FpChip::UnsafeFieldPoint>, fp2_pt: FieldVector<FpChip::UnsafeFieldPoint>, ) -> FieldVector<FpChip::UnsafeFieldPoint>
pub fn conjugate( &self, ctx: &mut Context<F>, a: FieldVector<FpChip::FieldPoint>, ) -> FieldVector<FpChip::FieldPoint>
Trait Implementations
Source§impl<'a, F: Clone + BigPrimeField, FpChip: Clone + FieldChip<F>, Fp12: Clone, const XI_0: i64> Clone for Fp12Chip<'a, F, FpChip, Fp12, XI_0>
impl<'a, F: Clone + BigPrimeField, FpChip: Clone + FieldChip<F>, Fp12: Clone, const XI_0: i64> Clone for Fp12Chip<'a, F, FpChip, Fp12, XI_0>
Source§impl<'a, F: Debug + BigPrimeField, FpChip: Debug + FieldChip<F>, Fp12: Debug, const XI_0: i64> Debug for Fp12Chip<'a, F, FpChip, Fp12, XI_0>
impl<'a, F: Debug + BigPrimeField, FpChip: Debug + FieldChip<F>, Fp12: Debug, const XI_0: i64> Debug for Fp12Chip<'a, F, FpChip, Fp12, XI_0>
Source§impl<F, FpChip, Fp12, const XI_0: i64> FieldChip<F> for Fp12Chip<'_, F, FpChip, Fp12, XI_0>where
F: BigPrimeField,
FpChip: PrimeFieldChip<F>,
FpChip::FieldType: BigPrimeField,
Fp12: Field + FieldExtConstructor<FpChip::FieldType, 12>,
FieldVector<FpChip::UnsafeFieldPoint>: From<FieldVector<FpChip::FieldPoint>>,
FieldVector<FpChip::FieldPoint>: From<FieldVector<FpChip::ReducedFieldPoint>>,
impl<F, FpChip, Fp12, const XI_0: i64> FieldChip<F> for Fp12Chip<'_, F, FpChip, Fp12, XI_0>where
F: BigPrimeField,
FpChip: PrimeFieldChip<F>,
FpChip::FieldType: BigPrimeField,
Fp12: Field + FieldExtConstructor<FpChip::FieldType, 12>,
FieldVector<FpChip::UnsafeFieldPoint>: From<FieldVector<FpChip::FieldPoint>>,
FieldVector<FpChip::FieldPoint>: From<FieldVector<FpChip::ReducedFieldPoint>>,
Source§fn range_check(
&self,
ctx: &mut Context<F>,
a: impl Into<Self::FieldPoint>,
max_bits: usize,
)
fn range_check( &self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>, max_bits: usize, )
§Assumptions
max_bits <= n * k
wheren = self.fp_chip.limb_bits
andk = self.fp_chip.num_limbs
a[i].truncation.limbs.len() = self.fp_chip.num_limbs
for alli = 0..a.len()
const PRIME_FIELD_NUM_BITS: u32 = <FpChip::FieldType>::NUM_BITS
Source§type UnsafeFieldPoint = FieldVector<<FpChip as FieldChip<F>>::UnsafeFieldPoint>
type UnsafeFieldPoint = FieldVector<<FpChip as FieldChip<F>>::UnsafeFieldPoint>
A representation of a field element that is used for intermediate computations.
The representation can have “overflows” (e.g., overflow limbs or negative limbs).
Source§type FieldPoint = FieldVector<<FpChip as FieldChip<F>>::FieldPoint>
type FieldPoint = FieldVector<<FpChip as FieldChip<F>>::FieldPoint>
The “proper” representation of a field element. Allowed to be a non-unique representation of a field element (e.g., can be greater than modulus)
Source§type ReducedFieldPoint = FieldVector<<FpChip as FieldChip<F>>::ReducedFieldPoint>
type ReducedFieldPoint = FieldVector<<FpChip as FieldChip<F>>::ReducedFieldPoint>
A proper representation of field elements that guarantees a unique representation of each field element. Typically this means Uints that are less than the modulus.
Source§type FieldType = Fp12
type FieldType = Fp12
A type implementing
Field
trait to help with witness generation (for example with inverse)type RangeChip = <FpChip as FieldChip<F>>::RangeChip
fn get_assigned_value(&self, x: &Self::UnsafeFieldPoint) -> Fp12
fn mul_no_carry( &self, ctx: &mut Context<F>, a: impl Into<Self::UnsafeFieldPoint>, b: impl Into<Self::UnsafeFieldPoint>, ) -> Self::UnsafeFieldPoint
fn native_modulus(&self) -> &BigUint
fn range(&self) -> &Self::RangeChip
fn limb_bits(&self) -> usize
Source§fn load_private(
&self,
ctx: &mut Context<F>,
fe: Self::FieldType,
) -> Self::FieldPoint
fn load_private( &self, ctx: &mut Context<F>, fe: Self::FieldType, ) -> Self::FieldPoint
Assigns
fe
as private witness. Note that the witness may not be constrained to be a unique representation of the field element fe
.Source§fn load_constant(
&self,
ctx: &mut Context<F>,
fe: Self::FieldType,
) -> Self::FieldPoint
fn load_constant( &self, ctx: &mut Context<F>, fe: Self::FieldType, ) -> Self::FieldPoint
Assigns
fe
as constant.fn add_no_carry( &self, ctx: &mut Context<F>, a: impl Into<Self::UnsafeFieldPoint>, b: impl Into<Self::UnsafeFieldPoint>, ) -> Self::UnsafeFieldPoint
Source§fn add_constant_no_carry(
&self,
ctx: &mut Context<F>,
a: impl Into<Self::UnsafeFieldPoint>,
c: Self::FieldType,
) -> Self::UnsafeFieldPoint
fn add_constant_no_carry( &self, ctx: &mut Context<F>, a: impl Into<Self::UnsafeFieldPoint>, c: Self::FieldType, ) -> Self::UnsafeFieldPoint
output:
a + c
fn sub_no_carry( &self, ctx: &mut Context<F>, a: impl Into<Self::UnsafeFieldPoint>, b: impl Into<Self::UnsafeFieldPoint>, ) -> Self::UnsafeFieldPoint
fn negate(&self, ctx: &mut Context<F>, a: Self::FieldPoint) -> Self::FieldPoint
Source§fn scalar_mul_no_carry(
&self,
ctx: &mut Context<F>,
a: impl Into<Self::UnsafeFieldPoint>,
c: i64,
) -> Self::UnsafeFieldPoint
fn scalar_mul_no_carry( &self, ctx: &mut Context<F>, a: impl Into<Self::UnsafeFieldPoint>, c: i64, ) -> Self::UnsafeFieldPoint
a * c
Source§fn scalar_mul_and_add_no_carry(
&self,
ctx: &mut Context<F>,
a: impl Into<Self::UnsafeFieldPoint>,
b: impl Into<Self::UnsafeFieldPoint>,
c: i64,
) -> Self::UnsafeFieldPoint
fn scalar_mul_and_add_no_carry( &self, ctx: &mut Context<F>, a: impl Into<Self::UnsafeFieldPoint>, b: impl Into<Self::UnsafeFieldPoint>, c: i64, ) -> Self::UnsafeFieldPoint
a * c + b
fn check_carry_mod_to_zero( &self, ctx: &mut Context<F>, a: Self::UnsafeFieldPoint, )
fn carry_mod( &self, ctx: &mut Context<F>, a: Self::UnsafeFieldPoint, ) -> Self::FieldPoint
Source§fn enforce_less_than(
&self,
ctx: &mut Context<F>,
a: Self::FieldPoint,
) -> Self::ReducedFieldPoint
fn enforce_less_than( &self, ctx: &mut Context<F>, a: Self::FieldPoint, ) -> Self::ReducedFieldPoint
Constrains that
a
is a reduced representation and returns the wrapped a
.fn is_soft_zero( &self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>, ) -> AssignedValue<F>
fn is_soft_nonzero( &self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>, ) -> AssignedValue<F>
fn is_zero( &self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>, ) -> AssignedValue<F>
fn is_equal_unenforced( &self, ctx: &mut Context<F>, a: Self::ReducedFieldPoint, b: Self::ReducedFieldPoint, ) -> AssignedValue<F>
fn assert_equal( &self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>, b: impl Into<Self::FieldPoint>, )
fn gate(&self) -> &<Self::RangeChip as RangeInstructions<F>>::Gate
Source§fn load_private_reduced(
&self,
ctx: &mut Context<F>,
fe: Self::FieldType,
) -> Self::ReducedFieldPoint
fn load_private_reduced( &self, ctx: &mut Context<F>, fe: Self::FieldType, ) -> Self::ReducedFieldPoint
Assigns
fe
as private witness and contrains the witness to be in reduced form.fn is_equal( &self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>, b: impl Into<Self::FieldPoint>, ) -> AssignedValue<F>
Source§fn mul(
&self,
ctx: &mut Context<F>,
a: impl Into<Self::UnsafeFieldPoint>,
b: impl Into<Self::UnsafeFieldPoint>,
) -> Self::FieldPoint
fn mul( &self, ctx: &mut Context<F>, a: impl Into<Self::UnsafeFieldPoint>, b: impl Into<Self::UnsafeFieldPoint>, ) -> Self::FieldPoint
If using
UnsafeFieldPoint
, make sure multiplication does not cause overflow.Source§fn divide(
&self,
ctx: &mut Context<F>,
a: impl Into<Self::FieldPoint>,
b: impl Into<Self::FieldPoint>,
) -> Self::FieldPoint
fn divide( &self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>, b: impl Into<Self::FieldPoint>, ) -> Self::FieldPoint
Constrains that
b
is nonzero as a field element and then returns a / b
.Source§fn divide_unsafe(
&self,
ctx: &mut Context<F>,
a: impl Into<Self::UnsafeFieldPoint>,
b: impl Into<Self::UnsafeFieldPoint>,
) -> Self::FieldPoint
fn divide_unsafe( &self, ctx: &mut Context<F>, a: impl Into<Self::UnsafeFieldPoint>, b: impl Into<Self::UnsafeFieldPoint>, ) -> Self::FieldPoint
Source§fn neg_divide(
&self,
ctx: &mut Context<F>,
a: impl Into<Self::FieldPoint>,
b: impl Into<Self::FieldPoint>,
) -> Self::FieldPoint
fn neg_divide( &self, ctx: &mut Context<F>, a: impl Into<Self::FieldPoint>, b: impl Into<Self::FieldPoint>, ) -> Self::FieldPoint
Constrains that
b
is nonzero as a field element and then returns -a / b
.