1use std::{
2 array::from_fn,
3 borrow::Borrow,
4 marker::PhantomData,
5 sync::{Arc, Mutex},
6};
7
8use openvm_circuit_primitives::utils::next_power_of_two_or_zero;
9use openvm_circuit_primitives_derive::AlignedBorrow;
10use openvm_instructions::{instruction::Instruction, LocalOpcode};
11use openvm_stark_backend::{
12 air_builders::{debug::DebugConstraintBuilder, symbolic::SymbolicRapBuilder},
13 config::{StarkGenericConfig, Val},
14 p3_air::{Air, AirBuilder, BaseAir},
15 p3_field::{FieldAlgebra, PrimeField32},
16 p3_matrix::{dense::RowMajorMatrix, Matrix},
17 p3_maybe_rayon::prelude::*,
18 prover::types::AirProofInput,
19 rap::{get_air_name, BaseAirWithPublicValues, PartitionedBaseAir},
20 AirRef, Chip, ChipUsageGetter,
21};
22use serde::{de::DeserializeOwned, Deserialize, Serialize};
23
24use super::{ExecutionState, InstructionExecutor, Result};
25use crate::system::memory::{MemoryController, OfflineMemory};
26
27pub trait VmAdapterInterface<T> {
29 type Reads;
31 type Writes;
33 type ProcessedInstruction;
38}
39
40pub trait VmAdapterChip<F> {
43 type ReadRecord: Send + Serialize + DeserializeOwned;
45 type WriteRecord: Send + Serialize + DeserializeOwned;
47 type Air: BaseAir<F> + Clone;
49
50 type Interface: VmAdapterInterface<F>;
51
52 #[allow(clippy::type_complexity)]
58 fn preprocess(
59 &mut self,
60 memory: &mut MemoryController<F>,
61 instruction: &Instruction<F>,
62 ) -> Result<(
63 <Self::Interface as VmAdapterInterface<F>>::Reads,
64 Self::ReadRecord,
65 )>;
66
67 fn postprocess(
70 &mut self,
71 memory: &mut MemoryController<F>,
72 instruction: &Instruction<F>,
73 from_state: ExecutionState<u32>,
74 output: AdapterRuntimeContext<F, Self::Interface>,
75 read_record: &Self::ReadRecord,
76 ) -> Result<(ExecutionState<u32>, Self::WriteRecord)>;
77
78 fn generate_trace_row(
83 &self,
84 row_slice: &mut [F],
85 read_record: Self::ReadRecord,
86 write_record: Self::WriteRecord,
87 memory: &OfflineMemory<F>,
88 );
89
90 fn air(&self) -> &Self::Air;
91}
92
93pub trait VmAdapterAir<AB: AirBuilder>: BaseAir<AB::F> {
94 type Interface: VmAdapterInterface<AB::Expr>;
95
96 fn eval(
101 &self,
102 builder: &mut AB,
103 local: &[AB::Var],
104 interface: AdapterAirContext<AB::Expr, Self::Interface>,
105 );
106
107 fn get_from_pc(&self, local: &[AB::Var]) -> AB::Var;
109}
110
111pub trait VmCoreChip<F, I: VmAdapterInterface<F>> {
113 type Record: Send + Serialize + DeserializeOwned;
115 type Air: BaseAirWithPublicValues<F> + Clone;
117
118 #[allow(clippy::type_complexity)]
119 fn execute_instruction(
120 &self,
121 instruction: &Instruction<F>,
122 from_pc: u32,
123 reads: I::Reads,
124 ) -> Result<(AdapterRuntimeContext<F, I>, Self::Record)>;
125
126 fn get_opcode_name(&self, opcode: usize) -> String;
127
128 fn generate_trace_row(&self, row_slice: &mut [F], record: Self::Record);
133
134 fn generate_public_values(&self) -> Vec<F> {
136 vec![]
137 }
138
139 fn air(&self) -> &Self::Air;
140
141 fn finalize(&self, _trace: &mut RowMajorMatrix<F>, _num_records: usize) {
145 }
147}
148
149pub trait VmCoreAir<AB, I>: BaseAirWithPublicValues<AB::F>
150where
151 AB: AirBuilder,
152 I: VmAdapterInterface<AB::Expr>,
153{
154 fn eval(
156 &self,
157 builder: &mut AB,
158 local_core: &[AB::Var],
159 from_pc: AB::Var,
160 ) -> AdapterAirContext<AB::Expr, I>;
161
162 fn start_offset(&self) -> usize;
166
167 fn start_offset_expr(&self) -> AB::Expr {
168 AB::Expr::from_canonical_usize(self.start_offset())
169 }
170
171 fn expr_to_global_expr(&self, local_expr: impl Into<AB::Expr>) -> AB::Expr {
172 self.start_offset_expr() + local_expr.into()
173 }
174
175 fn opcode_to_global_expr(&self, local_opcode: impl LocalOpcode) -> AB::Expr {
176 self.expr_to_global_expr(AB::Expr::from_canonical_usize(local_opcode.local_usize()))
177 }
178}
179
180pub struct AdapterRuntimeContext<T, I: VmAdapterInterface<T>> {
181 pub to_pc: Option<u32>,
183 pub writes: I::Writes,
184}
185
186impl<T, I: VmAdapterInterface<T>> AdapterRuntimeContext<T, I> {
187 pub fn without_pc(writes: impl Into<I::Writes>) -> Self {
189 Self {
190 to_pc: None,
191 writes: writes.into(),
192 }
193 }
194}
195
196pub struct AdapterAirContext<T, I: VmAdapterInterface<T>> {
197 pub to_pc: Option<T>,
199 pub reads: I::Reads,
200 pub writes: I::Writes,
201 pub instruction: I::ProcessedInstruction,
202}
203
204pub struct VmChipWrapper<F, A: VmAdapterChip<F>, C: VmCoreChip<F, A::Interface>> {
205 pub adapter: A,
206 pub core: C,
207 pub records: Vec<(A::ReadRecord, A::WriteRecord, C::Record)>,
208 offline_memory: Arc<Mutex<OfflineMemory<F>>>,
209}
210
211const DEFAULT_RECORDS_CAPACITY: usize = 1 << 20;
212
213impl<F, A, C> VmChipWrapper<F, A, C>
214where
215 A: VmAdapterChip<F>,
216 C: VmCoreChip<F, A::Interface>,
217{
218 pub fn new(adapter: A, core: C, offline_memory: Arc<Mutex<OfflineMemory<F>>>) -> Self {
219 Self {
220 adapter,
221 core,
222 records: Vec::with_capacity(DEFAULT_RECORDS_CAPACITY),
223 offline_memory,
224 }
225 }
226}
227
228impl<F, A, M> InstructionExecutor<F> for VmChipWrapper<F, A, M>
229where
230 F: PrimeField32,
231 A: VmAdapterChip<F> + Send + Sync,
232 M: VmCoreChip<F, A::Interface> + Send + Sync,
233{
234 fn execute(
235 &mut self,
236 memory: &mut MemoryController<F>,
237 instruction: &Instruction<F>,
238 from_state: ExecutionState<u32>,
239 ) -> Result<ExecutionState<u32>> {
240 let (reads, read_record) = self.adapter.preprocess(memory, instruction)?;
241 let (output, core_record) =
242 self.core
243 .execute_instruction(instruction, from_state.pc, reads)?;
244 let (to_state, write_record) =
245 self.adapter
246 .postprocess(memory, instruction, from_state, output, &read_record)?;
247 self.records.push((read_record, write_record, core_record));
248 Ok(to_state)
249 }
250
251 fn get_opcode_name(&self, opcode: usize) -> String {
252 self.core.get_opcode_name(opcode)
253 }
254}
255
256impl<SC, A, C> Chip<SC> for VmChipWrapper<Val<SC>, A, C>
263where
264 SC: StarkGenericConfig,
265 Val<SC>: PrimeField32,
266 A: VmAdapterChip<Val<SC>> + Send + Sync,
267 C: VmCoreChip<Val<SC>, A::Interface> + Send + Sync,
268 A::Air: Send + Sync + 'static,
269 A::Air: VmAdapterAir<SymbolicRapBuilder<Val<SC>>>,
270 A::Air: for<'a> VmAdapterAir<DebugConstraintBuilder<'a, SC>>,
271 C::Air: Send + Sync + 'static,
272 C::Air: VmCoreAir<
273 SymbolicRapBuilder<Val<SC>>,
274 <A::Air as VmAdapterAir<SymbolicRapBuilder<Val<SC>>>>::Interface,
275 >,
276 C::Air: for<'a> VmCoreAir<
277 DebugConstraintBuilder<'a, SC>,
278 <A::Air as VmAdapterAir<DebugConstraintBuilder<'a, SC>>>::Interface,
279 >,
280{
281 fn air(&self) -> AirRef<SC> {
282 let air: VmAirWrapper<A::Air, C::Air> = VmAirWrapper {
283 adapter: self.adapter.air().clone(),
284 core: self.core.air().clone(),
285 };
286 Arc::new(air)
287 }
288
289 fn generate_air_proof_input(self) -> AirProofInput<SC> {
290 let num_records = self.records.len();
291 let height = next_power_of_two_or_zero(num_records);
292 let core_width = self.core.air().width();
293 let adapter_width = self.adapter.air().width();
294 let width = core_width + adapter_width;
295 let mut values = Val::<SC>::zero_vec(height * width);
296
297 let memory = self.offline_memory.lock().unwrap();
298
299 values
302 .par_chunks_mut(width)
303 .zip(self.records.into_par_iter())
304 .for_each(|(row_slice, record)| {
305 let (adapter_row, core_row) = row_slice.split_at_mut(adapter_width);
306 self.adapter
307 .generate_trace_row(adapter_row, record.0, record.1, &memory);
308 self.core.generate_trace_row(core_row, record.2);
309 });
310
311 let mut trace = RowMajorMatrix::new(values, width);
312 self.core.finalize(&mut trace, num_records);
313
314 AirProofInput::simple(trace, self.core.generate_public_values())
315 }
316}
317
318impl<F, A, M> ChipUsageGetter for VmChipWrapper<F, A, M>
319where
320 A: VmAdapterChip<F> + Sync,
321 M: VmCoreChip<F, A::Interface> + Sync,
322{
323 fn air_name(&self) -> String {
324 format!(
325 "<{},{}>",
326 get_air_name(self.adapter.air()),
327 get_air_name(self.core.air())
328 )
329 }
330 fn current_trace_height(&self) -> usize {
331 self.records.len()
332 }
333 fn trace_width(&self) -> usize {
334 self.adapter.air().width() + self.core.air().width()
335 }
336}
337
338pub struct VmAirWrapper<A, C> {
339 pub adapter: A,
340 pub core: C,
341}
342
343impl<F, A, C> BaseAir<F> for VmAirWrapper<A, C>
344where
345 A: BaseAir<F>,
346 C: BaseAir<F>,
347{
348 fn width(&self) -> usize {
349 self.adapter.width() + self.core.width()
350 }
351}
352
353impl<F, A, M> BaseAirWithPublicValues<F> for VmAirWrapper<A, M>
354where
355 A: BaseAir<F>,
356 M: BaseAirWithPublicValues<F>,
357{
358 fn num_public_values(&self) -> usize {
359 self.core.num_public_values()
360 }
361}
362
363impl<F, A, M> PartitionedBaseAir<F> for VmAirWrapper<A, M>
365where
366 A: BaseAir<F>,
367 M: BaseAir<F>,
368{
369}
370
371impl<AB, A, M> Air<AB> for VmAirWrapper<A, M>
372where
373 AB: AirBuilder,
374 A: VmAdapterAir<AB>,
375 M: VmCoreAir<AB, A::Interface>,
376{
377 fn eval(&self, builder: &mut AB) {
378 let main = builder.main();
379 let local = main.row_slice(0);
380 let local: &[AB::Var] = (*local).borrow();
381 let (local_adapter, local_core) = local.split_at(self.adapter.width());
382
383 let ctx = self
384 .core
385 .eval(builder, local_core, self.adapter.get_from_pc(local_adapter));
386 self.adapter.eval(builder, local_adapter, ctx);
387 }
388}
389
390pub struct BasicAdapterInterface<
399 T,
400 PI,
401 const NUM_READS: usize,
402 const NUM_WRITES: usize,
403 const READ_SIZE: usize,
404 const WRITE_SIZE: usize,
405>(PhantomData<T>, PhantomData<PI>);
406
407impl<
408 T,
409 PI,
410 const NUM_READS: usize,
411 const NUM_WRITES: usize,
412 const READ_SIZE: usize,
413 const WRITE_SIZE: usize,
414 > VmAdapterInterface<T>
415 for BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>
416{
417 type Reads = [[T; READ_SIZE]; NUM_READS];
418 type Writes = [[T; WRITE_SIZE]; NUM_WRITES];
419 type ProcessedInstruction = PI;
420}
421
422pub struct VecHeapAdapterInterface<
423 T,
424 const NUM_READS: usize,
425 const BLOCKS_PER_READ: usize,
426 const BLOCKS_PER_WRITE: usize,
427 const READ_SIZE: usize,
428 const WRITE_SIZE: usize,
429>(PhantomData<T>);
430
431impl<
432 T,
433 const NUM_READS: usize,
434 const BLOCKS_PER_READ: usize,
435 const BLOCKS_PER_WRITE: usize,
436 const READ_SIZE: usize,
437 const WRITE_SIZE: usize,
438 > VmAdapterInterface<T>
439 for VecHeapAdapterInterface<
440 T,
441 NUM_READS,
442 BLOCKS_PER_READ,
443 BLOCKS_PER_WRITE,
444 READ_SIZE,
445 WRITE_SIZE,
446 >
447{
448 type Reads = [[[T; READ_SIZE]; BLOCKS_PER_READ]; NUM_READS];
449 type Writes = [[T; WRITE_SIZE]; BLOCKS_PER_WRITE];
450 type ProcessedInstruction = MinimalInstruction<T>;
451}
452
453pub struct VecHeapTwoReadsAdapterInterface<
454 T,
455 const BLOCKS_PER_READ1: usize,
456 const BLOCKS_PER_READ2: usize,
457 const BLOCKS_PER_WRITE: usize,
458 const READ_SIZE: usize,
459 const WRITE_SIZE: usize,
460>(PhantomData<T>);
461
462impl<
463 T,
464 const BLOCKS_PER_READ1: usize,
465 const BLOCKS_PER_READ2: usize,
466 const BLOCKS_PER_WRITE: usize,
467 const READ_SIZE: usize,
468 const WRITE_SIZE: usize,
469 > VmAdapterInterface<T>
470 for VecHeapTwoReadsAdapterInterface<
471 T,
472 BLOCKS_PER_READ1,
473 BLOCKS_PER_READ2,
474 BLOCKS_PER_WRITE,
475 READ_SIZE,
476 WRITE_SIZE,
477 >
478{
479 type Reads = (
480 [[T; READ_SIZE]; BLOCKS_PER_READ1],
481 [[T; READ_SIZE]; BLOCKS_PER_READ2],
482 );
483 type Writes = [[T; WRITE_SIZE]; BLOCKS_PER_WRITE];
484 type ProcessedInstruction = MinimalInstruction<T>;
485}
486
487pub struct FlatInterface<T, PI, const READ_CELLS: usize, const WRITE_CELLS: usize>(
489 PhantomData<T>,
490 PhantomData<PI>,
491);
492
493impl<T, PI, const READ_CELLS: usize, const WRITE_CELLS: usize> VmAdapterInterface<T>
494 for FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>
495{
496 type Reads = [T; READ_CELLS];
497 type Writes = [T; WRITE_CELLS];
498 type ProcessedInstruction = PI;
499}
500
501#[derive(Serialize, Deserialize)]
504pub struct DynAdapterInterface<T>(PhantomData<T>);
505
506impl<T> VmAdapterInterface<T> for DynAdapterInterface<T> {
507 type Reads = DynArray<T>;
509 type Writes = DynArray<T>;
511 type ProcessedInstruction = DynArray<T>;
513}
514
515#[derive(Clone, Debug, Default)]
517pub struct DynArray<T>(pub Vec<T>);
518
519#[repr(C)]
524#[derive(AlignedBorrow)]
525pub struct MinimalInstruction<T> {
526 pub is_valid: T,
527 pub opcode: T,
529}
530
531#[repr(C)]
533#[derive(AlignedBorrow)]
534pub struct ImmInstruction<T> {
535 pub is_valid: T,
536 pub opcode: T,
538 pub immediate: T,
539}
540
541#[repr(C)]
543#[derive(AlignedBorrow)]
544pub struct SignedImmInstruction<T> {
545 pub is_valid: T,
546 pub opcode: T,
548 pub immediate: T,
549 pub imm_sign: T,
551}
552
553mod conversions {
558 use super::*;
559
560 impl<
562 T,
563 const NUM_READS: usize,
564 const BLOCKS_PER_READ: usize,
565 const BLOCKS_PER_WRITE: usize,
566 const READ_SIZE: usize,
567 const WRITE_SIZE: usize,
568 >
569 From<
570 AdapterAirContext<
571 T,
572 VecHeapAdapterInterface<
573 T,
574 NUM_READS,
575 BLOCKS_PER_READ,
576 BLOCKS_PER_WRITE,
577 READ_SIZE,
578 WRITE_SIZE,
579 >,
580 >,
581 > for AdapterAirContext<T, DynAdapterInterface<T>>
582 {
583 fn from(
584 ctx: AdapterAirContext<
585 T,
586 VecHeapAdapterInterface<
587 T,
588 NUM_READS,
589 BLOCKS_PER_READ,
590 BLOCKS_PER_WRITE,
591 READ_SIZE,
592 WRITE_SIZE,
593 >,
594 >,
595 ) -> Self {
596 AdapterAirContext {
597 to_pc: ctx.to_pc,
598 reads: ctx.reads.into(),
599 writes: ctx.writes.into(),
600 instruction: ctx.instruction.into(),
601 }
602 }
603 }
604
605 impl<
607 T,
608 const NUM_READS: usize,
609 const BLOCKS_PER_READ: usize,
610 const BLOCKS_PER_WRITE: usize,
611 const READ_SIZE: usize,
612 const WRITE_SIZE: usize,
613 >
614 From<
615 AdapterRuntimeContext<
616 T,
617 VecHeapAdapterInterface<
618 T,
619 NUM_READS,
620 BLOCKS_PER_READ,
621 BLOCKS_PER_WRITE,
622 READ_SIZE,
623 WRITE_SIZE,
624 >,
625 >,
626 > for AdapterRuntimeContext<T, DynAdapterInterface<T>>
627 {
628 fn from(
629 ctx: AdapterRuntimeContext<
630 T,
631 VecHeapAdapterInterface<
632 T,
633 NUM_READS,
634 BLOCKS_PER_READ,
635 BLOCKS_PER_WRITE,
636 READ_SIZE,
637 WRITE_SIZE,
638 >,
639 >,
640 ) -> Self {
641 AdapterRuntimeContext {
642 to_pc: ctx.to_pc,
643 writes: ctx.writes.into(),
644 }
645 }
646 }
647
648 impl<
650 T,
651 const NUM_READS: usize,
652 const BLOCKS_PER_READ: usize,
653 const BLOCKS_PER_WRITE: usize,
654 const READ_SIZE: usize,
655 const WRITE_SIZE: usize,
656 > From<AdapterAirContext<T, DynAdapterInterface<T>>>
657 for AdapterAirContext<
658 T,
659 VecHeapAdapterInterface<
660 T,
661 NUM_READS,
662 BLOCKS_PER_READ,
663 BLOCKS_PER_WRITE,
664 READ_SIZE,
665 WRITE_SIZE,
666 >,
667 >
668 {
669 fn from(ctx: AdapterAirContext<T, DynAdapterInterface<T>>) -> Self {
670 AdapterAirContext {
671 to_pc: ctx.to_pc,
672 reads: ctx.reads.into(),
673 writes: ctx.writes.into(),
674 instruction: ctx.instruction.into(),
675 }
676 }
677 }
678
679 impl<
681 T,
682 const NUM_READS: usize,
683 const BLOCKS_PER_READ: usize,
684 const BLOCKS_PER_WRITE: usize,
685 const READ_SIZE: usize,
686 const WRITE_SIZE: usize,
687 > From<AdapterRuntimeContext<T, DynAdapterInterface<T>>>
688 for AdapterRuntimeContext<
689 T,
690 VecHeapAdapterInterface<
691 T,
692 NUM_READS,
693 BLOCKS_PER_READ,
694 BLOCKS_PER_WRITE,
695 READ_SIZE,
696 WRITE_SIZE,
697 >,
698 >
699 {
700 fn from(ctx: AdapterRuntimeContext<T, DynAdapterInterface<T>>) -> Self {
701 AdapterRuntimeContext {
702 to_pc: ctx.to_pc,
703 writes: ctx.writes.into(),
704 }
705 }
706 }
707
708 impl<
710 T: Clone,
711 const BLOCKS_PER_READ1: usize,
712 const BLOCKS_PER_READ2: usize,
713 const BLOCKS_PER_WRITE: usize,
714 const READ_SIZE: usize,
715 const WRITE_SIZE: usize,
716 > From<AdapterAirContext<T, DynAdapterInterface<T>>>
717 for AdapterAirContext<
718 T,
719 VecHeapTwoReadsAdapterInterface<
720 T,
721 BLOCKS_PER_READ1,
722 BLOCKS_PER_READ2,
723 BLOCKS_PER_WRITE,
724 READ_SIZE,
725 WRITE_SIZE,
726 >,
727 >
728 {
729 fn from(ctx: AdapterAirContext<T, DynAdapterInterface<T>>) -> Self {
730 AdapterAirContext {
731 to_pc: ctx.to_pc,
732 reads: ctx.reads.into(),
733 writes: ctx.writes.into(),
734 instruction: ctx.instruction.into(),
735 }
736 }
737 }
738
739 impl<
741 T,
742 const BLOCKS_PER_READ1: usize,
743 const BLOCKS_PER_READ2: usize,
744 const BLOCKS_PER_WRITE: usize,
745 const READ_SIZE: usize,
746 const WRITE_SIZE: usize,
747 > From<AdapterRuntimeContext<T, DynAdapterInterface<T>>>
748 for AdapterRuntimeContext<
749 T,
750 VecHeapTwoReadsAdapterInterface<
751 T,
752 BLOCKS_PER_READ1,
753 BLOCKS_PER_READ2,
754 BLOCKS_PER_WRITE,
755 READ_SIZE,
756 WRITE_SIZE,
757 >,
758 >
759 {
760 fn from(ctx: AdapterRuntimeContext<T, DynAdapterInterface<T>>) -> Self {
761 AdapterRuntimeContext {
762 to_pc: ctx.to_pc,
763 writes: ctx.writes.into(),
764 }
765 }
766 }
767
768 impl<
770 T,
771 PI,
772 const BASIC_NUM_READS: usize,
773 const BASIC_NUM_WRITES: usize,
774 const NUM_READS: usize,
775 const BLOCKS_PER_READ: usize,
776 const BLOCKS_PER_WRITE: usize,
777 const READ_SIZE: usize,
778 const WRITE_SIZE: usize,
779 >
780 From<
781 AdapterRuntimeContext<
782 T,
783 BasicAdapterInterface<
784 T,
785 PI,
786 BASIC_NUM_READS,
787 BASIC_NUM_WRITES,
788 READ_SIZE,
789 WRITE_SIZE,
790 >,
791 >,
792 >
793 for AdapterRuntimeContext<
794 T,
795 VecHeapAdapterInterface<
796 T,
797 NUM_READS,
798 BLOCKS_PER_READ,
799 BLOCKS_PER_WRITE,
800 READ_SIZE,
801 WRITE_SIZE,
802 >,
803 >
804 {
805 fn from(
806 ctx: AdapterRuntimeContext<
807 T,
808 BasicAdapterInterface<
809 T,
810 PI,
811 BASIC_NUM_READS,
812 BASIC_NUM_WRITES,
813 READ_SIZE,
814 WRITE_SIZE,
815 >,
816 >,
817 ) -> Self {
818 assert_eq!(BASIC_NUM_WRITES, BLOCKS_PER_WRITE);
819 let mut writes_it = ctx.writes.into_iter();
820 let writes = from_fn(|_| writes_it.next().unwrap());
821 AdapterRuntimeContext {
822 to_pc: ctx.to_pc,
823 writes,
824 }
825 }
826 }
827
828 impl<
830 T,
831 PI: Into<MinimalInstruction<T>>,
832 const BASIC_NUM_READS: usize,
833 const BASIC_NUM_WRITES: usize,
834 const NUM_READS: usize,
835 const BLOCKS_PER_READ: usize,
836 const BLOCKS_PER_WRITE: usize,
837 const READ_SIZE: usize,
838 const WRITE_SIZE: usize,
839 >
840 From<
841 AdapterAirContext<
842 T,
843 BasicAdapterInterface<
844 T,
845 PI,
846 BASIC_NUM_READS,
847 BASIC_NUM_WRITES,
848 READ_SIZE,
849 WRITE_SIZE,
850 >,
851 >,
852 >
853 for AdapterAirContext<
854 T,
855 VecHeapAdapterInterface<
856 T,
857 NUM_READS,
858 BLOCKS_PER_READ,
859 BLOCKS_PER_WRITE,
860 READ_SIZE,
861 WRITE_SIZE,
862 >,
863 >
864 {
865 fn from(
866 ctx: AdapterAirContext<
867 T,
868 BasicAdapterInterface<
869 T,
870 PI,
871 BASIC_NUM_READS,
872 BASIC_NUM_WRITES,
873 READ_SIZE,
874 WRITE_SIZE,
875 >,
876 >,
877 ) -> Self {
878 assert_eq!(BASIC_NUM_READS, NUM_READS * BLOCKS_PER_READ);
879 let mut reads_it = ctx.reads.into_iter();
880 let reads = from_fn(|_| from_fn(|_| reads_it.next().unwrap()));
881 assert_eq!(BASIC_NUM_WRITES, BLOCKS_PER_WRITE);
882 let mut writes_it = ctx.writes.into_iter();
883 let writes = from_fn(|_| writes_it.next().unwrap());
884 AdapterAirContext {
885 to_pc: ctx.to_pc,
886 reads,
887 writes,
888 instruction: ctx.instruction.into(),
889 }
890 }
891 }
892
893 impl<
895 T,
896 PI,
897 const NUM_READS: usize,
898 const NUM_WRITES: usize,
899 const READ_SIZE: usize,
900 const WRITE_SIZE: usize,
901 const READ_CELLS: usize,
902 const WRITE_CELLS: usize,
903 >
904 From<
905 AdapterAirContext<
906 T,
907 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
908 >,
909 > for AdapterAirContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>
910 {
911 fn from(
915 ctx: AdapterAirContext<
916 T,
917 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
918 >,
919 ) -> AdapterAirContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>> {
920 assert_eq!(READ_CELLS, NUM_READS * READ_SIZE);
921 assert_eq!(WRITE_CELLS, NUM_WRITES * WRITE_SIZE);
922 let mut reads_it = ctx.reads.into_iter().flatten();
923 let reads = from_fn(|_| reads_it.next().unwrap());
924 let mut writes_it = ctx.writes.into_iter().flatten();
925 let writes = from_fn(|_| writes_it.next().unwrap());
926 AdapterAirContext {
927 to_pc: ctx.to_pc,
928 reads,
929 writes,
930 instruction: ctx.instruction,
931 }
932 }
933 }
934
935 impl<
937 T,
938 PI,
939 const NUM_READS: usize,
940 const NUM_WRITES: usize,
941 const READ_SIZE: usize,
942 const WRITE_SIZE: usize,
943 const READ_CELLS: usize,
944 const WRITE_CELLS: usize,
945 > From<AdapterAirContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>>
946 for AdapterAirContext<
947 T,
948 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
949 >
950 {
951 fn from(
955 AdapterAirContext {
956 to_pc,
957 reads,
958 writes,
959 instruction,
960 }: AdapterAirContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>,
961 ) -> AdapterAirContext<
962 T,
963 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
964 > {
965 assert_eq!(READ_CELLS, NUM_READS * READ_SIZE);
966 assert_eq!(WRITE_CELLS, NUM_WRITES * WRITE_SIZE);
967 let mut reads_it = reads.into_iter();
968 let reads: [[T; READ_SIZE]; NUM_READS] =
969 from_fn(|_| from_fn(|_| reads_it.next().unwrap()));
970 let mut writes_it = writes.into_iter();
971 let writes: [[T; WRITE_SIZE]; NUM_WRITES] =
972 from_fn(|_| from_fn(|_| writes_it.next().unwrap()));
973 AdapterAirContext {
974 to_pc,
975 reads,
976 writes,
977 instruction,
978 }
979 }
980 }
981
982 impl<
984 T,
985 PI,
986 const NUM_READS: usize,
987 const NUM_WRITES: usize,
988 const READ_SIZE: usize,
989 const WRITE_SIZE: usize,
990 const READ_CELLS: usize,
991 const WRITE_CELLS: usize,
992 >
993 From<
994 AdapterRuntimeContext<
995 T,
996 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
997 >,
998 > for AdapterRuntimeContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>
999 {
1000 fn from(
1004 ctx: AdapterRuntimeContext<
1005 T,
1006 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
1007 >,
1008 ) -> AdapterRuntimeContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>> {
1009 assert_eq!(WRITE_CELLS, NUM_WRITES * WRITE_SIZE);
1010 let mut writes_it = ctx.writes.into_iter().flatten();
1011 let writes = from_fn(|_| writes_it.next().unwrap());
1012 AdapterRuntimeContext {
1013 to_pc: ctx.to_pc,
1014 writes,
1015 }
1016 }
1017 }
1018
1019 impl<
1021 T: FieldAlgebra,
1022 PI,
1023 const NUM_READS: usize,
1024 const NUM_WRITES: usize,
1025 const READ_SIZE: usize,
1026 const WRITE_SIZE: usize,
1027 const READ_CELLS: usize,
1028 const WRITE_CELLS: usize,
1029 > From<AdapterRuntimeContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>>
1030 for AdapterRuntimeContext<
1031 T,
1032 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
1033 >
1034 {
1035 fn from(
1039 ctx: AdapterRuntimeContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>,
1040 ) -> AdapterRuntimeContext<
1041 T,
1042 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
1043 > {
1044 assert_eq!(WRITE_CELLS, NUM_WRITES * WRITE_SIZE);
1045 let mut writes_it = ctx.writes.into_iter();
1046 let writes: [[T; WRITE_SIZE]; NUM_WRITES] =
1047 from_fn(|_| from_fn(|_| writes_it.next().unwrap()));
1048 AdapterRuntimeContext {
1049 to_pc: ctx.to_pc,
1050 writes,
1051 }
1052 }
1053 }
1054
1055 impl<T> From<Vec<T>> for DynArray<T> {
1056 fn from(v: Vec<T>) -> Self {
1057 Self(v)
1058 }
1059 }
1060
1061 impl<T> From<DynArray<T>> for Vec<T> {
1062 fn from(v: DynArray<T>) -> Vec<T> {
1063 v.0
1064 }
1065 }
1066
1067 impl<T, const N: usize, const M: usize> From<[[T; N]; M]> for DynArray<T> {
1068 fn from(v: [[T; N]; M]) -> Self {
1069 Self(v.into_iter().flatten().collect())
1070 }
1071 }
1072
1073 impl<T, const N: usize, const M: usize> From<DynArray<T>> for [[T; N]; M] {
1074 fn from(v: DynArray<T>) -> Self {
1075 assert_eq!(v.0.len(), N * M, "Incorrect vector length {}", v.0.len());
1076 let mut it = v.0.into_iter();
1077 from_fn(|_| from_fn(|_| it.next().unwrap()))
1078 }
1079 }
1080
1081 impl<T, const N: usize, const M: usize, const R: usize> From<[[[T; N]; M]; R]> for DynArray<T> {
1082 fn from(v: [[[T; N]; M]; R]) -> Self {
1083 Self(
1084 v.into_iter()
1085 .flat_map(|x| x.into_iter().flatten())
1086 .collect(),
1087 )
1088 }
1089 }
1090
1091 impl<T, const N: usize, const M: usize, const R: usize> From<DynArray<T>> for [[[T; N]; M]; R] {
1092 fn from(v: DynArray<T>) -> Self {
1093 assert_eq!(
1094 v.0.len(),
1095 N * M * R,
1096 "Incorrect vector length {}",
1097 v.0.len()
1098 );
1099 let mut it = v.0.into_iter();
1100 from_fn(|_| from_fn(|_| from_fn(|_| it.next().unwrap())))
1101 }
1102 }
1103
1104 impl<T, const N: usize, const M1: usize, const M2: usize> From<([[T; N]; M1], [[T; N]; M2])>
1105 for DynArray<T>
1106 {
1107 fn from(v: ([[T; N]; M1], [[T; N]; M2])) -> Self {
1108 let vec =
1109 v.0.into_iter()
1110 .flatten()
1111 .chain(v.1.into_iter().flatten())
1112 .collect();
1113 Self(vec)
1114 }
1115 }
1116
1117 impl<T, const N: usize, const M1: usize, const M2: usize> From<DynArray<T>>
1118 for ([[T; N]; M1], [[T; N]; M2])
1119 {
1120 fn from(v: DynArray<T>) -> Self {
1121 assert_eq!(
1122 v.0.len(),
1123 N * (M1 + M2),
1124 "Incorrect vector length {}",
1125 v.0.len()
1126 );
1127 let mut it = v.0.into_iter();
1128 (
1129 from_fn(|_| from_fn(|_| it.next().unwrap())),
1130 from_fn(|_| from_fn(|_| it.next().unwrap())),
1131 )
1132 }
1133 }
1134
1135 impl<
1137 T,
1138 PI: Into<DynArray<T>>,
1139 const NUM_READS: usize,
1140 const NUM_WRITES: usize,
1141 const READ_SIZE: usize,
1142 const WRITE_SIZE: usize,
1143 >
1144 From<
1145 AdapterAirContext<
1146 T,
1147 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
1148 >,
1149 > for AdapterAirContext<T, DynAdapterInterface<T>>
1150 {
1151 fn from(
1152 ctx: AdapterAirContext<
1153 T,
1154 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
1155 >,
1156 ) -> Self {
1157 AdapterAirContext {
1158 to_pc: ctx.to_pc,
1159 reads: ctx.reads.into(),
1160 writes: ctx.writes.into(),
1161 instruction: ctx.instruction.into(),
1162 }
1163 }
1164 }
1165
1166 impl<
1168 T,
1169 PI,
1170 const NUM_READS: usize,
1171 const NUM_WRITES: usize,
1172 const READ_SIZE: usize,
1173 const WRITE_SIZE: usize,
1174 >
1175 From<
1176 AdapterRuntimeContext<
1177 T,
1178 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
1179 >,
1180 > for AdapterRuntimeContext<T, DynAdapterInterface<T>>
1181 {
1182 fn from(
1183 ctx: AdapterRuntimeContext<
1184 T,
1185 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
1186 >,
1187 ) -> Self {
1188 AdapterRuntimeContext {
1189 to_pc: ctx.to_pc,
1190 writes: ctx.writes.into(),
1191 }
1192 }
1193 }
1194
1195 impl<
1197 T,
1198 PI,
1199 const NUM_READS: usize,
1200 const NUM_WRITES: usize,
1201 const READ_SIZE: usize,
1202 const WRITE_SIZE: usize,
1203 > From<AdapterAirContext<T, DynAdapterInterface<T>>>
1204 for AdapterAirContext<
1205 T,
1206 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
1207 >
1208 where
1209 PI: From<DynArray<T>>,
1210 {
1211 fn from(ctx: AdapterAirContext<T, DynAdapterInterface<T>>) -> Self {
1212 AdapterAirContext {
1213 to_pc: ctx.to_pc,
1214 reads: ctx.reads.into(),
1215 writes: ctx.writes.into(),
1216 instruction: ctx.instruction.into(),
1217 }
1218 }
1219 }
1220
1221 impl<
1223 T,
1224 PI,
1225 const NUM_READS: usize,
1226 const NUM_WRITES: usize,
1227 const READ_SIZE: usize,
1228 const WRITE_SIZE: usize,
1229 > From<AdapterRuntimeContext<T, DynAdapterInterface<T>>>
1230 for AdapterRuntimeContext<
1231 T,
1232 BasicAdapterInterface<T, PI, NUM_READS, NUM_WRITES, READ_SIZE, WRITE_SIZE>,
1233 >
1234 {
1235 fn from(ctx: AdapterRuntimeContext<T, DynAdapterInterface<T>>) -> Self {
1236 AdapterRuntimeContext {
1237 to_pc: ctx.to_pc,
1238 writes: ctx.writes.into(),
1239 }
1240 }
1241 }
1242
1243 impl<T: Clone, PI: Into<DynArray<T>>, const READ_CELLS: usize, const WRITE_CELLS: usize>
1245 From<AdapterAirContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>>
1246 for AdapterAirContext<T, DynAdapterInterface<T>>
1247 {
1248 fn from(ctx: AdapterAirContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>) -> Self {
1249 AdapterAirContext {
1250 to_pc: ctx.to_pc,
1251 reads: ctx.reads.to_vec().into(),
1252 writes: ctx.writes.to_vec().into(),
1253 instruction: ctx.instruction.into(),
1254 }
1255 }
1256 }
1257
1258 impl<T: Clone, PI, const READ_CELLS: usize, const WRITE_CELLS: usize>
1260 From<AdapterRuntimeContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>>
1261 for AdapterRuntimeContext<T, DynAdapterInterface<T>>
1262 {
1263 fn from(
1264 ctx: AdapterRuntimeContext<T, FlatInterface<T, PI, READ_CELLS, WRITE_CELLS>>,
1265 ) -> Self {
1266 AdapterRuntimeContext {
1267 to_pc: ctx.to_pc,
1268 writes: ctx.writes.to_vec().into(),
1269 }
1270 }
1271 }
1272
1273 impl<T> From<MinimalInstruction<T>> for DynArray<T> {
1274 fn from(m: MinimalInstruction<T>) -> Self {
1275 Self(vec![m.is_valid, m.opcode])
1276 }
1277 }
1278
1279 impl<T> From<DynArray<T>> for MinimalInstruction<T> {
1280 fn from(m: DynArray<T>) -> Self {
1281 let mut m = m.0.into_iter();
1282 MinimalInstruction {
1283 is_valid: m.next().unwrap(),
1284 opcode: m.next().unwrap(),
1285 }
1286 }
1287 }
1288
1289 impl<T> From<DynArray<T>> for ImmInstruction<T> {
1290 fn from(m: DynArray<T>) -> Self {
1291 let mut m = m.0.into_iter();
1292 ImmInstruction {
1293 is_valid: m.next().unwrap(),
1294 opcode: m.next().unwrap(),
1295 immediate: m.next().unwrap(),
1296 }
1297 }
1298 }
1299
1300 impl<T> From<ImmInstruction<T>> for DynArray<T> {
1301 fn from(instruction: ImmInstruction<T>) -> Self {
1302 DynArray::from(vec![
1303 instruction.is_valid,
1304 instruction.opcode,
1305 instruction.immediate,
1306 ])
1307 }
1308 }
1309}