use std::{array::from_fn, borrow::Borrow, cell::RefCell, marker::PhantomData, sync::Arc};
use openvm_circuit_primitives::utils::next_power_of_two_or_zero;
use openvm_circuit_primitives_derive::AlignedBorrow;
use openvm_instructions::instruction::Instruction;
use openvm_stark_backend::{
air_builders::{
debug::DebugConstraintBuilder, prover::ProverConstraintFolder, symbolic::SymbolicRapBuilder,
},
config::{StarkGenericConfig, Val},
p3_air::{Air, AirBuilder, BaseAir},
p3_field::{AbstractField, PrimeField32},
p3_matrix::{dense::RowMajorMatrix, Matrix},
p3_maybe_rayon::prelude::*,
prover::types::AirProofInput,
rap::{get_air_name, AnyRap, BaseAirWithPublicValues, PartitionedBaseAir},
Chip, ChipUsageGetter,
};
use super::{ExecutionState, InstructionExecutor, Result};
use crate::system::memory::{MemoryAuxColsFactory, MemoryController, MemoryControllerRef};
pub trait VmAdapterInterface<T> {
type Reads;
type Writes;
type ProcessedInstruction;
}
pub trait VmAdapterChip<F> {
type ReadRecord: Send;
type WriteRecord: Send;
type Air: BaseAir<F> + Clone;
type Interface: VmAdapterInterface<F>;
#[allow(clippy::type_complexity)]
fn preprocess(
&mut self,
memory: &mut MemoryController<F>,
instruction: &Instruction<F>,
) -> Result<(
<Self::Interface as VmAdapterInterface<F>>::Reads,
Self::ReadRecord,
)>;
fn postprocess(
&mut self,
memory: &mut MemoryController<F>,
instruction: &Instruction<F>,
from_state: ExecutionState<u32>,
output: AdapterRuntimeContext<F, Self::Interface>,
read_record: &Self::ReadRecord,
) -> Result<(ExecutionState<u32>, Self::WriteRecord)>;
fn generate_trace_row(
&self,
row_slice: &mut [F],
read_record: Self::ReadRecord,
write_record: Self::WriteRecord,
aux_cols_factory: &MemoryAuxColsFactory<F>,
);
fn air(&self) -> &Self::Air;
}
pub trait VmAdapterAir<AB: AirBuilder>: BaseAir<AB::F> {
type Interface: VmAdapterInterface<AB::Expr>;
fn eval(
&self,
builder: &mut AB,
local: &[AB::Var],
interface: AdapterAirContext<AB::Expr, Self::Interface>,
);
fn get_from_pc(&self, local: &[AB::Var]) -> AB::Var;
}
pub trait VmCoreChip<F, I: VmAdapterInterface<F>> {
type Record: Send;
type Air: BaseAirWithPublicValues<F> + Clone;
#[allow(clippy::type_complexity)]
fn execute_instruction(
&self,
instruction: &Instruction<F>,
from_pc: u32,
reads: I::Reads,
) -> Result<(AdapterRuntimeContext<F, I>, Self::Record)>;
fn get_opcode_name(&self, opcode: usize) -> String;
fn generate_trace_row(&self, row_slice: &mut [F], record: Self::Record);
fn generate_public_values(&self) -> Vec<F> {
vec![]
}
fn air(&self) -> &Self::Air;
fn finalize(&self, _trace: &mut RowMajorMatrix<F>, _num_records: usize) {
}
}
pub trait VmCoreAir<AB, I>: BaseAirWithPublicValues<AB::F>
where
AB: AirBuilder,
I: VmAdapterInterface<AB::Expr>,
{
fn eval(
&self,
builder: &mut AB,
local_core: &[AB::Var],
from_pc: AB::Var,
) -> AdapterAirContext<AB::Expr, I>;
}
pub struct AdapterRuntimeContext<T, I: VmAdapterInterface<T>> {
pub to_pc: Option<u32>,
pub writes: I::Writes,
}
impl<T, I: VmAdapterInterface<T>> AdapterRuntimeContext<T, I> {
pub fn without_pc(writes: impl Into<I::Writes>) -> Self {
Self {
to_pc: None,
writes: writes.into(),
}
}
}
pub struct AdapterAirContext<T, I: VmAdapterInterface<T>> {
pub to_pc: Option<T>,
pub reads: I::Reads,
pub writes: I::Writes,
pub instruction: I::ProcessedInstruction,
}
pub struct VmChipWrapper<F, A: VmAdapterChip<F>, C: VmCoreChip<F, A::Interface>> {
pub adapter: A,
pub core: C,
pub records: Vec<(A::ReadRecord, A::WriteRecord, C::Record)>,
memory: MemoryControllerRef<F>,
}
impl<F, A, C> VmChipWrapper<F, A, C>
where
A: VmAdapterChip<F>,
C: VmCoreChip<F, A::Interface>,
{
pub fn new(adapter: A, core: C, memory: MemoryControllerRef<F>) -> Self {
Self {
adapter,
core,
records: vec![],
memory,
}
}
}
impl<F, A, M> InstructionExecutor<F> for VmChipWrapper<F, A, M>
where
F: PrimeField32,
A: VmAdapterChip<F> + Send + Sync,
M: VmCoreChip<F, A::Interface> + Send + Sync,
{
fn execute(
&mut self,
instruction: Instruction<F>,
from_state: ExecutionState<u32>,
) -> Result<ExecutionState<u32>> {
let mut memory = self.memory.borrow_mut();
let (reads, read_record) = self.adapter.preprocess(&mut memory, &instruction)?;
let (output, core_record) =
self.core
.execute_instruction(&instruction, from_state.pc, reads)?;
let (to_state, write_record) = self.adapter.postprocess(
&mut memory,
&instruction,
from_state,
output,
&read_record,
)?;
self.records.push((read_record, write_record, core_record));
Ok(to_state)
}
fn get_opcode_name(&self, opcode: usize) -> String {
self.core.get_opcode_name(opcode)
}
}
impl<SC, A, C> Chip<SC> for VmChipWrapper<Val<SC>, A, C>
where
SC: StarkGenericConfig,
Val<SC>: PrimeField32,
A: VmAdapterChip<Val<SC>> + Send + Sync,
C: VmCoreChip<Val<SC>, A::Interface> + Send + Sync,
A::Air: Send + Sync + 'static,
A::Air: VmAdapterAir<SymbolicRapBuilder<Val<SC>>>,
A::Air: for<'a> VmAdapterAir<ProverConstraintFolder<'a, SC>>,
A::Air: for<'a> VmAdapterAir<DebugConstraintBuilder<'a, SC>>,
C::Air: Send + Sync + 'static,
C::Air: VmCoreAir<
SymbolicRapBuilder<Val<SC>>,
<A::Air as VmAdapterAir<SymbolicRapBuilder<Val<SC>>>>::Interface,
>,
C::Air: for<'a> VmCoreAir<
ProverConstraintFolder<'a, SC>,
<A::Air as VmAdapterAir<ProverConstraintFolder<'a, SC>>>::Interface,
>,
C::Air: for<'a> VmCoreAir<
DebugConstraintBuilder<'a, SC>,
<A::Air as VmAdapterAir<DebugConstraintBuilder<'a, SC>>>::Interface,
>,
{
fn air(&self) -> Arc<dyn AnyRap<SC>> {
let air: VmAirWrapper<A::Air, C::Air> = VmAirWrapper {
adapter: self.adapter.air().clone(),
core: self.core.air().clone(),
};
Arc::new(air)
}
fn generate_air_proof_input(self) -> AirProofInput<SC> {
let air = self.air();
let num_records = self.records.len();
let height = next_power_of_two_or_zero(num_records);
let core_width = self.core.air().width();
let adapter_width = self.adapter.air().width();
let width = core_width + adapter_width;
let mut values = Val::<SC>::zero_vec(height * width);
let memory_aux_cols_factory = RefCell::borrow(&self.memory).aux_cols_factory();
values
.par_chunks_mut(width)
.zip(self.records.into_par_iter())
.for_each(|(row_slice, record)| {
let (adapter_row, core_row) = row_slice.split_at_mut(adapter_width);
self.adapter.generate_trace_row(
adapter_row,
record.0,
record.1,
&memory_aux_cols_factory,
);
self.core.generate_trace_row(core_row, record.2);
});
let mut trace = RowMajorMatrix::new(values, width);
self.core.finalize(&mut trace, num_records);
AirProofInput::simple(air, trace, self.core.generate_public_values())
}
}
impl<F, A, M> ChipUsageGetter for VmChipWrapper<F, A, M>
where
A: VmAdapterChip<F> + Sync,
M: VmCoreChip<F, A::Interface> + Sync,
{
fn air_name(&self) -> String {
format!(
"<{},{}>",
get_air_name(self.adapter.air()),
get_air_name(self.core.air())
)
}
fn current_trace_height(&self) -> usize {
self.records.len()
}
fn trace_width(&self) -> usize {
self.adapter.air().width() + self.core.air().width()
}
}
pub struct VmAirWrapper<A, C> {
pub adapter: A,
pub core: C,
}
impl<F, A, C> BaseAir<F> for VmAirWrapper<A, C>
where
A: BaseAir<F>,
C: BaseAir<F>,
{
fn width(&self) -> usize {
self.adapter.width() + self.core.width()
}
}
impl<F, A, M> BaseAirWithPublicValues<F> for VmAirWrapper<A, M>
where
A: BaseAir<F>,
M: BaseAirWithPublicValues<F>,
{
fn num_public_values(&self) -> usize {
self.core.num_public_values()
}
}
impl<F, A, M> PartitionedBaseAir<F> for VmAirWrapper<A, M>
where
A: BaseAir<F>,
M: BaseAir<F>,
{
}
impl<AB, A, M> Air<AB> for VmAirWrapper<A, M>
where
AB: AirBuilder,
A: VmAdapterAir<AB>,
M: VmCoreAir<AB, A::Interface>,
{
fn eval(&self, builder: &mut AB) {
let main = builder.main();
let local = main.row_slice(0);
let local: &[AB::Var] = (*local).borrow();
let (local_adapter, local_core) = local.split_at(self.adapter.width());
let ctx = self
.core
.eval(builder, local_core, self.adapter.get_from_pc(local_adapter));
self.adapter.eval(builder, local_adapter, ctx);
}
}
pub struct BasicAdapterInterface<
T,
PI,
const NUM_READS: usize,
const NUM_WRITES: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
>(PhantomData<T>, PhantomData<PI>);
impl<
T,
PI,
const NUM_READS: usize,
const NUM_WRITES: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
> VmAdapterInterface<T>
for BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>
{
type Reads = [[T; READ_SIZE]; NUM_READS];
type Writes = [[T; WRITE_SIZE]; NUM_WRITES];
type ProcessedInstruction = PI;
}
pub struct VecHeapAdapterInterface<
T,
const NUM_READS: usize,
const BLOCKS_PER_READ: usize,
const BLOCKS_PER_WRITE: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
>(PhantomData<T>);
impl<
T,
const NUM_READS: usize,
const BLOCKS_PER_READ: usize,
const BLOCKS_PER_WRITE: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
> VmAdapterInterface<T>
for VecHeapAdapterInterface<
T,
NUM_READS,
BLOCKS_PER_READ,
BLOCKS_PER_WRITE,
READ_SIZE,
WRITE_SIZE,
>
{
type Reads = [[[T; READ_SIZE]; BLOCKS_PER_READ]; NUM_READS];
type Writes = [[T; WRITE_SIZE]; BLOCKS_PER_WRITE];
type ProcessedInstruction = MinimalInstruction<T>;
}
pub struct VecHeapTwoReadsAdapterInterface<
T,
const BLOCKS_PER_READ1: usize,
const BLOCKS_PER_READ2: usize,
const BLOCKS_PER_WRITE: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
>(PhantomData<T>);
impl<
T,
const BLOCKS_PER_READ1: usize,
const BLOCKS_PER_READ2: usize,
const BLOCKS_PER_WRITE: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
> VmAdapterInterface<T>
for VecHeapTwoReadsAdapterInterface<
T,
BLOCKS_PER_READ1,
BLOCKS_PER_READ2,
BLOCKS_PER_WRITE,
READ_SIZE,
WRITE_SIZE,
>
{
type Reads = (
[[T; READ_SIZE]; BLOCKS_PER_READ1],
[[T; READ_SIZE]; BLOCKS_PER_READ2],
);
type Writes = [[T; WRITE_SIZE]; BLOCKS_PER_WRITE];
type ProcessedInstruction = MinimalInstruction<T>;
}
pub struct FlatInterface<T, PI, const READ_CELLS: usize, const WRITE_CELLS: usize>(
PhantomData<T>,
PhantomData<PI>,
);
impl<T, PI, const READ_CELLS: usize, const WRITE_CELLS: usize> VmAdapterInterface<T>
for FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>
{
type Reads = [T; READ_CELLS];
type Writes = [T; WRITE_CELLS];
type ProcessedInstruction = PI;
}
pub struct DynAdapterInterface<T>(PhantomData<T>);
impl<T> VmAdapterInterface<T> for DynAdapterInterface<T> {
type Reads = DynArray<T>;
type Writes = DynArray<T>;
type ProcessedInstruction = DynArray<T>;
}
#[derive(Clone, Debug, Default)]
pub struct DynArray<T>(pub Vec<T>);
#[repr(C)]
#[derive(AlignedBorrow)]
pub struct MinimalInstruction<T> {
pub is_valid: T,
pub opcode: T,
}
#[repr(C)]
#[derive(AlignedBorrow)]
pub struct ImmInstruction<T> {
pub is_valid: T,
pub opcode: T,
pub immediate: T,
}
mod conversions {
use super::*;
impl<
T,
const NUM_READS: usize,
const BLOCKS_PER_READ: usize,
const BLOCKS_PER_WRITE: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
>
From<
AdapterAirContext<
T,
VecHeapAdapterInterface<
T,
NUM_READS,
BLOCKS_PER_READ,
BLOCKS_PER_WRITE,
READ_SIZE,
WRITE_SIZE,
>,
>,
> for AdapterAirContext<T, DynAdapterInterface<T>>
{
fn from(
ctx: AdapterAirContext<
T,
VecHeapAdapterInterface<
T,
NUM_READS,
BLOCKS_PER_READ,
BLOCKS_PER_WRITE,
READ_SIZE,
WRITE_SIZE,
>,
>,
) -> Self {
AdapterAirContext {
to_pc: ctx.to_pc,
reads: ctx.reads.into(),
writes: ctx.writes.into(),
instruction: ctx.instruction.into(),
}
}
}
impl<
T,
const NUM_READS: usize,
const BLOCKS_PER_READ: usize,
const BLOCKS_PER_WRITE: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
>
From<
AdapterRuntimeContext<
T,
VecHeapAdapterInterface<
T,
NUM_READS,
BLOCKS_PER_READ,
BLOCKS_PER_WRITE,
READ_SIZE,
WRITE_SIZE,
>,
>,
> for AdapterRuntimeContext<T, DynAdapterInterface<T>>
{
fn from(
ctx: AdapterRuntimeContext<
T,
VecHeapAdapterInterface<
T,
NUM_READS,
BLOCKS_PER_READ,
BLOCKS_PER_WRITE,
READ_SIZE,
WRITE_SIZE,
>,
>,
) -> Self {
AdapterRuntimeContext {
to_pc: ctx.to_pc,
writes: ctx.writes.into(),
}
}
}
impl<
T,
const NUM_READS: usize,
const BLOCKS_PER_READ: usize,
const BLOCKS_PER_WRITE: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
> From<AdapterAirContext<T, DynAdapterInterface<T>>>
for AdapterAirContext<
T,
VecHeapAdapterInterface<
T,
NUM_READS,
BLOCKS_PER_READ,
BLOCKS_PER_WRITE,
READ_SIZE,
WRITE_SIZE,
>,
>
{
fn from(ctx: AdapterAirContext<T, DynAdapterInterface<T>>) -> Self {
AdapterAirContext {
to_pc: ctx.to_pc,
reads: ctx.reads.into(),
writes: ctx.writes.into(),
instruction: ctx.instruction.into(),
}
}
}
impl<
T,
const NUM_READS: usize,
const BLOCKS_PER_READ: usize,
const BLOCKS_PER_WRITE: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
> From<AdapterRuntimeContext<T, DynAdapterInterface<T>>>
for AdapterRuntimeContext<
T,
VecHeapAdapterInterface<
T,
NUM_READS,
BLOCKS_PER_READ,
BLOCKS_PER_WRITE,
READ_SIZE,
WRITE_SIZE,
>,
>
{
fn from(ctx: AdapterRuntimeContext<T, DynAdapterInterface<T>>) -> Self {
AdapterRuntimeContext {
to_pc: ctx.to_pc,
writes: ctx.writes.into(),
}
}
}
impl<
T: Clone,
const BLOCKS_PER_READ1: usize,
const BLOCKS_PER_READ2: usize,
const BLOCKS_PER_WRITE: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
> From<AdapterAirContext<T, DynAdapterInterface<T>>>
for AdapterAirContext<
T,
VecHeapTwoReadsAdapterInterface<
T,
BLOCKS_PER_READ1,
BLOCKS_PER_READ2,
BLOCKS_PER_WRITE,
READ_SIZE,
WRITE_SIZE,
>,
>
{
fn from(ctx: AdapterAirContext<T, DynAdapterInterface<T>>) -> Self {
AdapterAirContext {
to_pc: ctx.to_pc,
reads: ctx.reads.into(),
writes: ctx.writes.into(),
instruction: ctx.instruction.into(),
}
}
}
impl<
T,
const BLOCKS_PER_READ1: usize,
const BLOCKS_PER_READ2: usize,
const BLOCKS_PER_WRITE: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
> From<AdapterRuntimeContext<T, DynAdapterInterface<T>>>
for AdapterRuntimeContext<
T,
VecHeapTwoReadsAdapterInterface<
T,
BLOCKS_PER_READ1,
BLOCKS_PER_READ2,
BLOCKS_PER_WRITE,
READ_SIZE,
WRITE_SIZE,
>,
>
{
fn from(ctx: AdapterRuntimeContext<T, DynAdapterInterface<T>>) -> Self {
AdapterRuntimeContext {
to_pc: ctx.to_pc,
writes: ctx.writes.into(),
}
}
}
impl<
T,
PI,
const BASIC_NUM_READS: usize,
const BASIC_NUM_WRITES: usize,
const NUM_READS: usize,
const BLOCKS_PER_READ: usize,
const BLOCKS_PER_WRITE: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
>
From<
AdapterRuntimeContext<
T,
BasicAdapterInterface<
T,
PI,
BASIC_NUM_READS,
BASIC_NUM_WRITES,
READ_SIZE,
WRITE_SIZE,
>,
>,
>
for AdapterRuntimeContext<
T,
VecHeapAdapterInterface<
T,
NUM_READS,
BLOCKS_PER_READ,
BLOCKS_PER_WRITE,
READ_SIZE,
WRITE_SIZE,
>,
>
{
fn from(
ctx: AdapterRuntimeContext<
T,
BasicAdapterInterface<
T,
PI,
BASIC_NUM_READS,
BASIC_NUM_WRITES,
READ_SIZE,
WRITE_SIZE,
>,
>,
) -> Self {
assert_eq!(BASIC_NUM_WRITES, BLOCKS_PER_WRITE);
let mut writes_it = ctx.writes.into_iter();
let writes = from_fn(|_| writes_it.next().unwrap());
AdapterRuntimeContext {
to_pc: ctx.to_pc,
writes,
}
}
}
impl<
T,
PI: Into<MinimalInstruction<T>>,
const BASIC_NUM_READS: usize,
const BASIC_NUM_WRITES: usize,
const NUM_READS: usize,
const BLOCKS_PER_READ: usize,
const BLOCKS_PER_WRITE: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
>
From<
AdapterAirContext<
T,
BasicAdapterInterface<
T,
PI,
BASIC_NUM_READS,
BASIC_NUM_WRITES,
READ_SIZE,
WRITE_SIZE,
>,
>,
>
for AdapterAirContext<
T,
VecHeapAdapterInterface<
T,
NUM_READS,
BLOCKS_PER_READ,
BLOCKS_PER_WRITE,
READ_SIZE,
WRITE_SIZE,
>,
>
{
fn from(
ctx: AdapterAirContext<
T,
BasicAdapterInterface<
T,
PI,
BASIC_NUM_READS,
BASIC_NUM_WRITES,
READ_SIZE,
WRITE_SIZE,
>,
>,
) -> Self {
assert_eq!(BASIC_NUM_READS, NUM_READS * BLOCKS_PER_READ);
let mut reads_it = ctx.reads.into_iter();
let reads = from_fn(|_| from_fn(|_| reads_it.next().unwrap()));
assert_eq!(BASIC_NUM_WRITES, BLOCKS_PER_WRITE);
let mut writes_it = ctx.writes.into_iter();
let writes = from_fn(|_| writes_it.next().unwrap());
AdapterAirContext {
to_pc: ctx.to_pc,
reads,
writes,
instruction: ctx.instruction.into(),
}
}
}
impl<
T,
PI,
const NUM_READS: usize,
const NUM_WRITES: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
const READ_CELLS: usize,
const WRITE_CELLS: usize,
>
From<
AdapterAirContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
>,
> for AdapterAirContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>
{
fn from(
ctx: AdapterAirContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
>,
) -> AdapterAirContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>> {
assert_eq!(READ_CELLS, NUM_READS * READ_SIZE);
assert_eq!(WRITE_CELLS, NUM_WRITES * WRITE_SIZE);
let mut reads_it = ctx.reads.into_iter().flatten();
let reads = from_fn(|_| reads_it.next().unwrap());
let mut writes_it = ctx.writes.into_iter().flatten();
let writes = from_fn(|_| writes_it.next().unwrap());
AdapterAirContext {
to_pc: ctx.to_pc,
reads,
writes,
instruction: ctx.instruction,
}
}
}
impl<
T,
PI,
const NUM_READS: usize,
const NUM_WRITES: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
const READ_CELLS: usize,
const WRITE_CELLS: usize,
> From<AdapterAirContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>>
for AdapterAirContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
>
{
fn from(
AdapterAirContext {
to_pc,
reads,
writes,
instruction,
}: AdapterAirContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>,
) -> AdapterAirContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
> {
assert_eq!(READ_CELLS, NUM_READS * READ_SIZE);
assert_eq!(WRITE_CELLS, NUM_WRITES * WRITE_SIZE);
let mut reads_it = reads.into_iter();
let reads: [[T; READ_SIZE]; NUM_READS] =
from_fn(|_| from_fn(|_| reads_it.next().unwrap()));
let mut writes_it = writes.into_iter();
let writes: [[T; WRITE_SIZE]; NUM_WRITES] =
from_fn(|_| from_fn(|_| writes_it.next().unwrap()));
AdapterAirContext {
to_pc,
reads,
writes,
instruction,
}
}
}
impl<
T,
PI,
const NUM_READS: usize,
const NUM_WRITES: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
const READ_CELLS: usize,
const WRITE_CELLS: usize,
>
From<
AdapterRuntimeContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
>,
> for AdapterRuntimeContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>
{
fn from(
ctx: AdapterRuntimeContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
>,
) -> AdapterRuntimeContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>> {
assert_eq!(WRITE_CELLS, NUM_WRITES * WRITE_SIZE);
let mut writes_it = ctx.writes.into_iter().flatten();
let writes = from_fn(|_| writes_it.next().unwrap());
AdapterRuntimeContext {
to_pc: ctx.to_pc,
writes,
}
}
}
impl<
T: AbstractField,
PI,
const NUM_READS: usize,
const NUM_WRITES: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
const READ_CELLS: usize,
const WRITE_CELLS: usize,
> From<AdapterRuntimeContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>>
for AdapterRuntimeContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
>
{
fn from(
ctx: AdapterRuntimeContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>,
) -> AdapterRuntimeContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
> {
assert_eq!(WRITE_CELLS, NUM_WRITES * WRITE_SIZE);
let mut writes_it = ctx.writes.into_iter();
let writes: [[T; WRITE_SIZE]; NUM_WRITES] =
from_fn(|_| from_fn(|_| writes_it.next().unwrap()));
AdapterRuntimeContext {
to_pc: ctx.to_pc,
writes,
}
}
}
impl<T> From<Vec<T>> for DynArray<T> {
fn from(v: Vec<T>) -> Self {
Self(v)
}
}
impl<T> From<DynArray<T>> for Vec<T> {
fn from(v: DynArray<T>) -> Vec<T> {
v.0
}
}
impl<T, const N: usize, const M: usize> From<[[T; N]; M]> for DynArray<T> {
fn from(v: [[T; N]; M]) -> Self {
Self(v.into_iter().flatten().collect())
}
}
impl<T, const N: usize, const M: usize> From<DynArray<T>> for [[T; N]; M] {
fn from(v: DynArray<T>) -> Self {
assert_eq!(v.0.len(), N * M, "Incorrect vector length {}", v.0.len());
let mut it = v.0.into_iter();
from_fn(|_| from_fn(|_| it.next().unwrap()))
}
}
impl<T, const N: usize, const M: usize, const R: usize> From<[[[T; N]; M]; R]> for DynArray<T> {
fn from(v: [[[T; N]; M]; R]) -> Self {
Self(
v.into_iter()
.flat_map(|x| x.into_iter().flatten())
.collect(),
)
}
}
impl<T, const N: usize, const M: usize, const R: usize> From<DynArray<T>> for [[[T; N]; M]; R] {
fn from(v: DynArray<T>) -> Self {
assert_eq!(
v.0.len(),
N * M * R,
"Incorrect vector length {}",
v.0.len()
);
let mut it = v.0.into_iter();
from_fn(|_| from_fn(|_| from_fn(|_| it.next().unwrap())))
}
}
impl<T, const N: usize, const M1: usize, const M2: usize> From<([[T; N]; M1], [[T; N]; M2])>
for DynArray<T>
{
fn from(v: ([[T; N]; M1], [[T; N]; M2])) -> Self {
let vec =
v.0.into_iter()
.flatten()
.chain(v.1.into_iter().flatten())
.collect();
Self(vec)
}
}
impl<T, const N: usize, const M1: usize, const M2: usize> From<DynArray<T>>
for ([[T; N]; M1], [[T; N]; M2])
{
fn from(v: DynArray<T>) -> Self {
assert_eq!(
v.0.len(),
N * (M1 + M2),
"Incorrect vector length {}",
v.0.len()
);
let mut it = v.0.into_iter();
(
from_fn(|_| from_fn(|_| it.next().unwrap())),
from_fn(|_| from_fn(|_| it.next().unwrap())),
)
}
}
impl<
T,
PI: Into<DynArray<T>>,
const NUM_READS: usize,
const NUM_WRITES: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
>
From<
AdapterAirContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
>,
> for AdapterAirContext<T, DynAdapterInterface<T>>
{
fn from(
ctx: AdapterAirContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
>,
) -> Self {
AdapterAirContext {
to_pc: ctx.to_pc,
reads: ctx.reads.into(),
writes: ctx.writes.into(),
instruction: ctx.instruction.into(),
}
}
}
impl<
T,
PI,
const NUM_READS: usize,
const NUM_WRITES: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
>
From<
AdapterRuntimeContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
>,
> for AdapterRuntimeContext<T, DynAdapterInterface<T>>
{
fn from(
ctx: AdapterRuntimeContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
>,
) -> Self {
AdapterRuntimeContext {
to_pc: ctx.to_pc,
writes: ctx.writes.into(),
}
}
}
impl<
T,
PI,
const NUM_READS: usize,
const NUM_WRITES: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
> From<AdapterAirContext<T, DynAdapterInterface<T>>>
for AdapterAirContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
>
where
PI: From<DynArray<T>>,
{
fn from(ctx: AdapterAirContext<T, DynAdapterInterface<T>>) -> Self {
AdapterAirContext {
to_pc: ctx.to_pc,
reads: ctx.reads.into(),
writes: ctx.writes.into(),
instruction: ctx.instruction.into(),
}
}
}
impl<
T,
PI,
const NUM_READS: usize,
const NUM_WRITES: usize,
const READ_SIZE: usize,
const WRITE_SIZE: usize,
> From<AdapterRuntimeContext<T, DynAdapterInterface<T>>>
for AdapterRuntimeContext<
T,
BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
>
{
fn from(ctx: AdapterRuntimeContext<T, DynAdapterInterface<T>>) -> Self {
AdapterRuntimeContext {
to_pc: ctx.to_pc,
writes: ctx.writes.into(),
}
}
}
impl<T: Clone, PI: Into<DynArray<T>>, const READ_CELLS: usize, const WRITE_CELLS: usize>
From<AdapterAirContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>>
for AdapterAirContext<T, DynAdapterInterface<T>>
{
fn from(ctx: AdapterAirContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>) -> Self {
AdapterAirContext {
to_pc: ctx.to_pc,
reads: ctx.reads.to_vec().into(),
writes: ctx.writes.to_vec().into(),
instruction: ctx.instruction.into(),
}
}
}
impl<T: Clone, PI, const READ_CELLS: usize, const WRITE_CELLS: usize>
From<AdapterRuntimeContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>>
for AdapterRuntimeContext<T, DynAdapterInterface<T>>
{
fn from(
ctx: AdapterRuntimeContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>,
) -> Self {
AdapterRuntimeContext {
to_pc: ctx.to_pc,
writes: ctx.writes.to_vec().into(),
}
}
}
impl<T> From<MinimalInstruction<T>> for DynArray<T> {
fn from(m: MinimalInstruction<T>) -> Self {
Self(vec![m.is_valid, m.opcode])
}
}
impl<T> From<DynArray<T>> for MinimalInstruction<T> {
fn from(m: DynArray<T>) -> Self {
let mut m = m.0.into_iter();
MinimalInstruction {
is_valid: m.next().unwrap(),
opcode: m.next().unwrap(),
}
}
}
impl<T> From<DynArray<T>> for ImmInstruction<T> {
fn from(m: DynArray<T>) -> Self {
let mut m = m.0.into_iter();
ImmInstruction {
is_valid: m.next().unwrap(),
opcode: m.next().unwrap(),
immediate: m.next().unwrap(),
}
}
}
impl<T> From<ImmInstruction<T>> for DynArray<T> {
fn from(instruction: ImmInstruction<T>) -> Self {
DynArray::from(vec![
instruction.is_valid,
instruction.opcode,
instruction.immediate,
])
}
}
}