openvm_circuit/arch/
interpreter.rs

1#[cfg(feature = "tco")]
2use std::marker::PhantomData;
3use std::{
4    alloc::{alloc, dealloc, handle_alloc_error, Layout},
5    borrow::{Borrow, BorrowMut},
6    iter::repeat_n,
7    ptr::NonNull,
8};
9
10use itertools::Itertools;
11use openvm_circuit_primitives_derive::AlignedBytesBorrow;
12use openvm_instructions::{
13    exe::{SparseMemoryImage, VmExe},
14    instruction::Instruction,
15    program::{Program, DEFAULT_PC_STEP},
16    LocalOpcode, SystemOpcode,
17};
18use openvm_stark_backend::p3_field::PrimeField32;
19use tracing::info_span;
20
21#[cfg(feature = "tco")]
22use crate::arch::Handler;
23use crate::{
24    arch::{
25        execution_mode::{
26            ExecutionCtx, ExecutionCtxTrait, MeteredCostCtx, MeteredCtx, MeteredExecutionCtxTrait,
27            Segment,
28        },
29        ExecuteFunc, ExecutionError, Executor, ExecutorInventory, ExitCode, MeteredExecutor,
30        StaticProgramError, Streams, SystemConfig, VmExecState, VmState,
31    },
32    system::memory::online::GuestMemory,
33};
34
35/// VM pure executor(E1/E2 executor) which doesn't consider trace generation.
36/// Note: This executor doesn't hold any VM state and can be used for multiple execution.
37///
38/// The generic `Ctx` and constructor determine whether this supported pure execution or metered
39/// execution.
40// NOTE: the lifetime 'a represents the lifetime of borrowed ExecutorInventory, which must outlive
41// the InterpretedInstance because `pre_compute_buf` may contain pointers to references held by
42// executors.
43pub struct InterpretedInstance<'a, F, Ctx> {
44    system_config: SystemConfig,
45    // SAFETY: this is not actually dead code, but `pre_compute_insns` contains raw pointer refers
46    // to this buffer.
47    #[allow(dead_code)]
48    pre_compute_buf: AlignedBuf,
49    /// Instruction table of function pointers and pointers to the pre-computed buffer. Indexed by
50    /// `pc_index = pc / DEFAULT_PC_STEP`.
51    /// SAFETY: The first `pc_base / DEFAULT_PC_STEP` entries will be unreachable. We do this to
52    /// avoid needing to subtract `pc_base` during runtime.
53    #[cfg(not(feature = "tco"))]
54    pre_compute_insns: Vec<PreComputeInstruction<'a, F, Ctx>>,
55    #[cfg(feature = "tco")]
56    pre_compute_max_size: usize,
57    /// Handler function pointers for tail call optimization.
58    #[cfg(feature = "tco")]
59    handlers: Vec<Handler<F, Ctx>>,
60
61    pc_start: u32,
62
63    init_memory: SparseMemoryImage,
64    #[cfg(feature = "tco")]
65    phantom: PhantomData<&'a ()>,
66}
67
68#[cfg_attr(feature = "tco", allow(dead_code))]
69struct PreComputeInstruction<'a, F, Ctx> {
70    pub handler: ExecuteFunc<F, Ctx>,
71    pub pre_compute: &'a [u8],
72}
73
74#[derive(AlignedBytesBorrow, Clone)]
75#[repr(C)]
76struct TerminatePreCompute {
77    exit_code: u32,
78}
79
80macro_rules! run {
81    ($span:literal, $interpreter:ident, $instret:ident, $pc:ident, $arg:ident, $exec_state:ident, $ctx:ident) => {{
82        #[cfg(feature = "metrics")]
83        let start = std::time::Instant::now();
84        #[cfg(feature = "metrics")]
85        let start_instret = $instret;
86
87        info_span!($span).in_scope(|| -> Result<(), ExecutionError> {
88            // SAFETY:
89            // - it is the responsibility of each Executor to ensure that pre_compute_insts contains
90            //   valid function pointers and pre-computed data
91            #[cfg(not(feature = "tco"))]
92            {
93                unsafe {
94                    tracing::debug!("execute_trampoline");
95                    execute_trampoline(
96                        $instret,
97                        $pc,
98                        $arg,
99                        &mut $exec_state,
100                        &$interpreter.pre_compute_insns,
101                    );
102                }
103            }
104            #[cfg(feature = "tco")]
105            {
106                tracing::debug!("execute_tco");
107
108                if $ctx::should_suspend($instret, $pc, $arg, &mut $exec_state) {
109                    $exec_state.set_instret_and_pc($instret, $pc);
110                    return Ok(());
111                }
112
113                let handler = $interpreter
114                    .get_handler($pc)
115                    .ok_or(ExecutionError::PcOutOfBounds($pc))?;
116                // SAFETY:
117                // - handler is generated by Executor, MeteredExecutor traits
118                // - it is the responsibility of each Executor to ensure handler is safe given a
119                //   valid VM state
120                unsafe {
121                    handler($interpreter, $instret, $pc, $arg, &mut $exec_state);
122                }
123            }
124            Ok(())
125        })?;
126
127        #[cfg(feature = "metrics")]
128        {
129            let elapsed = start.elapsed();
130            let insns = $exec_state.instret() - start_instret;
131            tracing::info!("instructions_executed={insns}");
132            metrics::counter!(concat!($span, "_insns")).absolute(insns);
133            metrics::gauge!(concat!($span, "_insn_mi/s"))
134                .set(insns as f64 / elapsed.as_micros() as f64);
135        }
136    }};
137}
138
139// Constructors for E1 and E2 respectively, which generate pre-computed buffers and function
140// pointers
141// - Generic in `Ctx`
142
143impl<'a, F, Ctx> InterpretedInstance<'a, F, Ctx>
144where
145    F: PrimeField32,
146    Ctx: ExecutionCtxTrait,
147{
148    /// Creates a new interpreter instance for pure execution.
149    // (E1 execution)
150    pub fn new<E>(
151        inventory: &'a ExecutorInventory<E>,
152        exe: &VmExe<F>,
153    ) -> Result<Self, StaticProgramError>
154    where
155        E: Executor<F>,
156    {
157        let program = &exe.program;
158        let pre_compute_max_size = get_pre_compute_max_size(program, inventory);
159        let mut pre_compute_buf = alloc_pre_compute_buf(program, pre_compute_max_size);
160        let mut split_pre_compute_buf =
161            split_pre_compute_buf(program, &mut pre_compute_buf, pre_compute_max_size);
162        #[cfg(not(feature = "tco"))]
163        let pre_compute_insns = get_pre_compute_instructions::<F, Ctx, E>(
164            program,
165            inventory,
166            &mut split_pre_compute_buf,
167        )?;
168        let pc_start = exe.pc_start;
169        let init_memory = exe.init_memory.clone();
170        #[cfg(feature = "tco")]
171        let handlers = repeat_n(&None, get_pc_index(program.pc_base))
172            .chain(program.instructions_and_debug_infos.iter())
173            .zip_eq(split_pre_compute_buf.iter_mut())
174            .enumerate()
175            .map(
176                |(pc_idx, (inst_opt, pre_compute))| -> Result<Handler<F, Ctx>, StaticProgramError> {
177                    if let Some((inst, _)) = inst_opt {
178                        let pc = pc_idx as u32 * DEFAULT_PC_STEP;
179                        if get_system_opcode_handler::<F, Ctx>(inst, pre_compute).is_some() {
180                            Ok(terminate_execute_e12_tco_handler)
181                        } else {
182                            // unwrap because get_pre_compute_instructions would have errored
183                            // already on DisabledOperation
184                            let executor = inventory.get_executor(inst.opcode).unwrap();
185                            executor.handler(pc, inst, pre_compute)
186                        }
187                    } else {
188                        Ok(unreachable_tco_handler)
189                    }
190                },
191            )
192            .collect::<Result<Vec<_>, _>>()?;
193
194        Ok(Self {
195            system_config: inventory.config().clone(),
196            pre_compute_buf,
197            #[cfg(not(feature = "tco"))]
198            pre_compute_insns,
199            pc_start,
200            init_memory,
201            #[cfg(feature = "tco")]
202            pre_compute_max_size,
203            #[cfg(feature = "tco")]
204            handlers,
205            #[cfg(feature = "tco")]
206            phantom: PhantomData,
207        })
208    }
209
210    pub fn create_initial_vm_state(&self, inputs: impl Into<Streams<F>>) -> VmState<F> {
211        VmState::initial(
212            &self.system_config,
213            &self.init_memory,
214            self.pc_start,
215            inputs,
216        )
217    }
218
219    /// # Safety
220    /// - This function assumes that the `pc` is within program bounds - this should be the case if
221    ///   the pc is checked to be in bounds before jumping to it.
222    /// - The returned slice may not be entirely initialized, but it is the job of each Executor to
223    /// initialize the parts of the buffer that the instruction handler will use.
224    #[cfg(feature = "tco")]
225    #[inline(always)]
226    pub fn get_pre_compute(&self, pc: u32) -> &[u8] {
227        let pc_idx = get_pc_index(pc);
228        // SAFETY:
229        // - we assume that pc is in bounds
230        // - pre_compute_buf is allocated for pre_compute_max_size * program_len bytes, with each
231        //   instruction getting pre_compute_max_size bytes
232        // - self.pre_compute_buf.ptr is non-null
233        // - initialization of the contents of the slice is the responsibility of each Executor
234        debug_assert!(
235            (pc_idx + 1) * self.pre_compute_max_size <= self.pre_compute_buf.layout.size()
236        );
237        unsafe {
238            let ptr = self
239                .pre_compute_buf
240                .ptr
241                .add(pc_idx * self.pre_compute_max_size);
242            std::slice::from_raw_parts(ptr, self.pre_compute_max_size)
243        }
244    }
245
246    #[cfg(feature = "tco")]
247    #[inline(always)]
248    pub fn get_handler(&self, pc: u32) -> Option<Handler<F, Ctx>> {
249        let pc_idx = get_pc_index(pc);
250        self.handlers.get(pc_idx).copied()
251    }
252}
253
254impl<'a, F, Ctx> InterpretedInstance<'a, F, Ctx>
255where
256    F: PrimeField32,
257    Ctx: MeteredExecutionCtxTrait,
258{
259    /// Creates a new interpreter instance for pure execution.
260    // (E1 execution)
261    pub fn new_metered<E>(
262        inventory: &'a ExecutorInventory<E>,
263        exe: &VmExe<F>,
264        executor_idx_to_air_idx: &[usize],
265    ) -> Result<Self, StaticProgramError>
266    where
267        E: MeteredExecutor<F>,
268    {
269        let program = &exe.program;
270        let pre_compute_max_size = get_metered_pre_compute_max_size(program, inventory);
271        let mut pre_compute_buf = alloc_pre_compute_buf(program, pre_compute_max_size);
272        let mut split_pre_compute_buf =
273            split_pre_compute_buf(program, &mut pre_compute_buf, pre_compute_max_size);
274        #[cfg(not(feature = "tco"))]
275        let pre_compute_insns = get_metered_pre_compute_instructions::<F, Ctx, E>(
276            program,
277            inventory,
278            executor_idx_to_air_idx,
279            &mut split_pre_compute_buf,
280        )?;
281
282        let pc_start = exe.pc_start;
283        let init_memory = exe.init_memory.clone();
284        #[cfg(feature = "tco")]
285        let handlers = repeat_n(&None, get_pc_index(program.pc_base))
286            .chain(program.instructions_and_debug_infos.iter())
287            .zip_eq(split_pre_compute_buf.iter_mut())
288            .enumerate()
289            .map(
290                |(pc_idx, (inst_opt, pre_compute))| -> Result<Handler<F, Ctx>, StaticProgramError> {
291                    if let Some((inst, _)) = inst_opt {
292                        let pc = pc_idx as u32 * DEFAULT_PC_STEP;
293                        if get_system_opcode_handler::<F, Ctx>(inst, pre_compute).is_some() {
294                            Ok(terminate_execute_e12_tco_handler)
295                        } else {
296                            // unwrap because get_pre_compute_instructions would have errored
297                            // already on DisabledOperation
298                            let executor_idx = inventory.instruction_lookup[&inst.opcode] as usize;
299                            let executor = &inventory.executors[executor_idx];
300                            let air_idx = executor_idx_to_air_idx[executor_idx];
301                            executor.metered_handler(air_idx, pc, inst, pre_compute)
302                        }
303                    } else {
304                        Ok(unreachable_tco_handler)
305                    }
306                },
307            )
308            .collect::<Result<Vec<_>, _>>()?;
309
310        Ok(Self {
311            system_config: inventory.config().clone(),
312            pre_compute_buf,
313            #[cfg(not(feature = "tco"))]
314            pre_compute_insns,
315            pc_start,
316            init_memory,
317            #[cfg(feature = "tco")]
318            pre_compute_max_size,
319            #[cfg(feature = "tco")]
320            handlers,
321            #[cfg(feature = "tco")]
322            phantom: PhantomData,
323        })
324    }
325}
326
327// Execute functions specialize to relevant Ctx types to provide more streamlines APIs
328
329impl<F> InterpretedInstance<'_, F, ExecutionCtx>
330where
331    F: PrimeField32,
332{
333    /// Pure execution, without metering, for the given `inputs`. Execution begins from the initial
334    /// state specified by the `VmExe`. This function executes the program until either termination
335    /// if `num_insns` is `None` or for exactly `num_insns` instructions if `num_insns` is `Some`.
336    ///
337    /// Returns the final VM state when execution stops.
338    pub fn execute(
339        &self,
340        inputs: impl Into<Streams<F>>,
341        num_insns: Option<u64>,
342    ) -> Result<VmState<F, GuestMemory>, ExecutionError> {
343        let vm_state = VmState::initial(
344            &self.system_config,
345            &self.init_memory,
346            self.pc_start,
347            inputs,
348        );
349        self.execute_from_state(vm_state, num_insns)
350    }
351
352    /// Pure execution, without metering, from the given `VmState`. This function executes the
353    /// program until either termination if `num_insns` is `None` or for exactly `num_insns`
354    /// instructions if `num_insns` is `Some`.
355    ///
356    /// Returns the final VM state when execution stops.
357    pub fn execute_from_state(
358        &self,
359        from_state: VmState<F, GuestMemory>,
360        num_insns: Option<u64>,
361    ) -> Result<VmState<F, GuestMemory>, ExecutionError> {
362        let instret = from_state.instret();
363        let instret_end = if let Some(n) = num_insns {
364            let end = instret
365                .checked_add(n)
366                .ok_or(ExecutionError::InstretOverflow {
367                    instret,
368                    num_insns: n,
369                })?;
370            Some(end)
371        } else {
372            None
373        };
374        let ctx = ExecutionCtx::new(instret_end);
375        let mut exec_state = VmExecState::new(from_state, ctx);
376
377        let pc = exec_state.pc();
378        let instret_end = exec_state.ctx.instret_end;
379        run!(
380            "execute_e1",
381            self,
382            instret,
383            pc,
384            instret_end,
385            exec_state,
386            ExecutionCtx
387        );
388        if num_insns.is_some() {
389            check_exit_code(exec_state.exit_code)?;
390        } else {
391            check_termination(exec_state.exit_code)?;
392        }
393        Ok(exec_state.vm_state)
394    }
395}
396
397impl<F> InterpretedInstance<'_, F, MeteredCtx>
398where
399    F: PrimeField32,
400{
401    /// Metered execution for the given `inputs`. Execution begins from the initial
402    /// state specified by the `VmExe`. This function executes the program until termination.
403    ///
404    /// Returns the segmentation boundary data and the final VM state when execution stops.
405    pub fn execute_metered(
406        &self,
407        inputs: impl Into<Streams<F>>,
408        ctx: MeteredCtx,
409    ) -> Result<(Vec<Segment>, VmState<F, GuestMemory>), ExecutionError> {
410        let vm_state = self.create_initial_vm_state(inputs);
411        self.execute_metered_from_state(vm_state, ctx)
412    }
413
414    /// Metered execution for the given `VmState`. This function executes the program until
415    /// termination.
416    ///
417    /// Returns the segmentation boundary data and the final VM state when execution stops.
418    ///
419    /// The [MeteredCtx] can be constructed using either
420    /// [VmExecutor::build_metered_ctx](super::VmExecutor::build_metered_ctx) or
421    /// [VirtualMachine::build_metered_ctx](super::VirtualMachine::build_metered_ctx).
422    pub fn execute_metered_from_state(
423        &self,
424        from_state: VmState<F, GuestMemory>,
425        ctx: MeteredCtx,
426    ) -> Result<(Vec<Segment>, VmState<F, GuestMemory>), ExecutionError> {
427        let mut exec_state = VmExecState::new(from_state, ctx);
428
429        loop {
430            exec_state = self.execute_metered_until_suspend(exec_state)?;
431            // The execution has terminated.
432            if exec_state.exit_code.is_ok() && exec_state.exit_code.as_ref().unwrap().is_some() {
433                break;
434            }
435            if exec_state.exit_code.is_err() {
436                return Err(exec_state.exit_code.unwrap_err());
437            }
438        }
439        check_termination(exec_state.exit_code)?;
440        let VmExecState { vm_state, ctx, .. } = exec_state;
441        Ok((ctx.into_segments(), vm_state))
442    }
443    /// Executes a metered virtual machine operation starting from a given execution state until
444    /// suspension.
445    ///
446    /// This function resumes and continues execution of a guest virtual machine until either it:
447    /// - Hits a suspension trigger (e.g. out of gas or a specific halt condition). ATTENTION: when
448    ///   a suspension is triggered, the VM state is not at the boundary of the last segment.
449    ///   Instead, the VM state is slightly after the segment boundary.
450    /// - Completes its run based on the instructions or context provided.
451    ///
452    /// # Parameters
453    /// - `self`: The reference to the current executor or VM context.
454    /// - `exec_state`: A mutable `VmExecState<F, GuestMemory, MeteredCtx>` which represents the
455    ///   execution state of the virtual machine, including its program counter (`pc`), instruction
456    ///   retirement (`instret`), and execution context (`MeteredCtx`).
457    ///
458    /// # Returns
459    /// - `Ok(VmExecState<F, GuestMemory, MeteredCtx>)`: The execution state after suspension or
460    ///   normal completion.
461    /// - `Err(ExecutionError)`: If there is an error during execution, such as an invalid state or
462    ///   run-time error.
463    pub fn execute_metered_until_suspend(
464        &self,
465        mut exec_state: VmExecState<F, GuestMemory, MeteredCtx>,
466    ) -> Result<VmExecState<F, GuestMemory, MeteredCtx>, ExecutionError> {
467        let instret = exec_state.instret();
468        let pc = exec_state.pc();
469        let segmentation_check_insns = exec_state.ctx.segmentation_ctx.segment_check_insns;
470        // Start execution
471        run!(
472            "execute_metered",
473            self,
474            instret,
475            pc,
476            segmentation_check_insns,
477            exec_state,
478            MeteredCtx
479        );
480        Ok(exec_state)
481    }
482}
483
484impl<F> InterpretedInstance<'_, F, MeteredCostCtx>
485where
486    F: PrimeField32,
487{
488    /// Metered cost execution for the given `inputs`. Execution begins from the initial
489    /// state specified by the `VmExe`. This function executes the program until termination.
490    ///
491    /// Returns the trace cost and final VM state when execution stops.
492    pub fn execute_metered_cost(
493        &self,
494        inputs: impl Into<Streams<F>>,
495        ctx: MeteredCostCtx,
496    ) -> Result<(u64, VmState<F, GuestMemory>), ExecutionError> {
497        let vm_state = self.create_initial_vm_state(inputs);
498        self.execute_metered_cost_from_state(vm_state, ctx)
499    }
500
501    /// Metered cost execution for the given `VmState`. This function executes the program until
502    /// termination.
503    ///
504    /// Returns the trace cost and final VM state when execution stops.
505    pub fn execute_metered_cost_from_state(
506        &self,
507        from_state: VmState<F, GuestMemory>,
508        ctx: MeteredCostCtx,
509    ) -> Result<(u64, VmState<F, GuestMemory>), ExecutionError> {
510        let mut exec_state = VmExecState::new(from_state, ctx);
511
512        let instret = exec_state.instret();
513        let pc = exec_state.pc();
514        let max_execution_cost = exec_state.ctx.max_execution_cost;
515        // Start execution
516        run!(
517            "execute_metered_cost",
518            self,
519            instret,
520            pc,
521            max_execution_cost,
522            exec_state,
523            MeteredCostCtx
524        );
525        check_exit_code(exec_state.exit_code)?;
526        let VmExecState { ctx, vm_state, .. } = exec_state;
527        let cost = ctx.cost;
528        Ok((cost, vm_state))
529    }
530}
531
532fn alloc_pre_compute_buf<F>(program: &Program<F>, pre_compute_max_size: usize) -> AlignedBuf {
533    let base_idx = get_pc_index(program.pc_base);
534    let padded_program_len = base_idx + program.instructions_and_debug_infos.len();
535    let buf_len = padded_program_len * pre_compute_max_size;
536    AlignedBuf::uninit(buf_len, pre_compute_max_size)
537}
538
539fn split_pre_compute_buf<'a, F>(
540    program: &Program<F>,
541    pre_compute_buf: &'a mut AlignedBuf,
542    pre_compute_max_size: usize,
543) -> Vec<&'a mut [u8]> {
544    let base_idx = get_pc_index(program.pc_base);
545    let padded_program_len = base_idx + program.instructions_and_debug_infos.len();
546    let buf_len = padded_program_len * pre_compute_max_size;
547    // SAFETY:
548    // - pre_compute_buf.ptr was allocated with exactly buf_len bytes
549    // - lifetime 'a ensures the returned slices don't outlive the AlignedBuf
550    let pre_compute_buf = unsafe { std::slice::from_raw_parts_mut(pre_compute_buf.ptr, buf_len) };
551    pre_compute_buf
552        .chunks_exact_mut(pre_compute_max_size)
553        .collect()
554}
555
556/// Executes using function pointers with the trampoline (loop) approach.
557///
558/// # Safety
559/// The `fn_ptrs` pointer to pre-computed buffers that outlive this function.
560#[cfg(not(feature = "tco"))]
561#[inline(always)]
562unsafe fn execute_trampoline<F: PrimeField32, Ctx: ExecutionCtxTrait>(
563    mut instret: u64,
564    mut pc: u32,
565    arg: u64,
566    exec_state: &mut VmExecState<F, GuestMemory, Ctx>,
567    fn_ptrs: &[PreComputeInstruction<F, Ctx>],
568) {
569    while exec_state
570        .exit_code
571        .as_ref()
572        .is_ok_and(|exit_code| exit_code.is_none())
573    {
574        if Ctx::should_suspend(instret, pc, arg, exec_state) {
575            break;
576        }
577        let pc_index = get_pc_index(pc);
578        if let Some(inst) = fn_ptrs.get(pc_index) {
579            // SAFETY: pre_compute assumed to live long enough
580            unsafe { (inst.handler)(inst.pre_compute, &mut instret, &mut pc, arg, exec_state) };
581        } else {
582            exec_state.exit_code = Err(ExecutionError::PcOutOfBounds(pc));
583        }
584    }
585    // Update the execution state with the final PC and instruction count
586    exec_state.set_instret_and_pc(instret, pc);
587}
588
589#[inline(always)]
590pub fn get_pc_index(pc: u32) -> usize {
591    (pc / DEFAULT_PC_STEP) as usize
592}
593
594/// Bytes allocated according to the given Layout
595// @dev: This is duplicate from the openvm crate, but it doesn't seem worth importing `openvm` here
596// just for this.
597pub struct AlignedBuf {
598    pub ptr: *mut u8,
599    pub layout: Layout,
600}
601
602impl AlignedBuf {
603    /// Allocate a new buffer whose start address is aligned to `align` bytes.
604    /// *NOTE* if `len` is zero then a creates new `NonNull` that is dangling and 16-byte aligned.
605    pub fn uninit(len: usize, align: usize) -> Self {
606        let layout = Layout::from_size_align(len, align).unwrap();
607        if layout.size() == 0 {
608            return Self {
609                ptr: NonNull::<u128>::dangling().as_ptr() as *mut u8,
610                layout,
611            };
612        }
613        // SAFETY: `len` is nonzero
614        let ptr = unsafe { alloc(layout) };
615        if ptr.is_null() {
616            handle_alloc_error(layout);
617        }
618        AlignedBuf { ptr, layout }
619    }
620}
621
622impl Drop for AlignedBuf {
623    fn drop(&mut self) {
624        if self.layout.size() != 0 {
625            // SAFETY: self.ptr was allocated with self.layout in AlignedBuf::uninit
626            unsafe {
627                dealloc(self.ptr, self.layout);
628            }
629        }
630    }
631}
632
633#[inline(always)]
634unsafe fn terminate_execute_e12_impl<F: PrimeField32, CTX: ExecutionCtxTrait>(
635    pre_compute: &[u8],
636    instret: &mut u64,
637    pc: &mut u32,
638    _arg: u64,
639    exec_state: &mut VmExecState<F, GuestMemory, CTX>,
640) {
641    let pre_compute: &TerminatePreCompute = pre_compute.borrow();
642    *instret += 1;
643    exec_state.exit_code = Ok(Some(pre_compute.exit_code));
644    exec_state.set_instret_and_pc(*instret, *pc);
645    CTX::on_terminate(*instret, *pc, exec_state);
646}
647
648#[cfg(feature = "tco")]
649unsafe fn terminate_execute_e12_tco_handler<F: PrimeField32, CTX: ExecutionCtxTrait>(
650    interpreter: &InterpretedInstance<F, CTX>,
651    mut instret: u64,
652    mut pc: u32,
653    arg: u64,
654    exec_state: &mut VmExecState<F, GuestMemory, CTX>,
655) {
656    let pre_compute = interpreter.get_pre_compute(pc);
657    terminate_execute_e12_impl(pre_compute, &mut instret, &mut pc, arg, exec_state);
658}
659
660#[cfg(feature = "tco")]
661unsafe fn unreachable_tco_handler<F: PrimeField32, CTX>(
662    _: &InterpretedInstance<F, CTX>,
663    instret: u64,
664    pc: u32,
665    _arg: u64,
666    exec_state: &mut VmExecState<F, GuestMemory, CTX>,
667) {
668    exec_state.set_instret_and_pc(instret, pc);
669    exec_state.exit_code = Err(ExecutionError::Unreachable(pc));
670}
671
672fn get_pre_compute_max_size<F, E: Executor<F>>(
673    program: &Program<F>,
674    inventory: &ExecutorInventory<E>,
675) -> usize {
676    program
677        .instructions_and_debug_infos
678        .iter()
679        .map(|inst_opt| {
680            if let Some((inst, _)) = inst_opt {
681                if let Some(size) = system_opcode_pre_compute_size(inst) {
682                    size
683                } else {
684                    inventory
685                        .get_executor(inst.opcode)
686                        .map(|executor| executor.pre_compute_size())
687                        .unwrap()
688                }
689            } else {
690                0
691            }
692        })
693        .max()
694        .unwrap()
695        .next_power_of_two()
696}
697
698fn get_metered_pre_compute_max_size<F, E: MeteredExecutor<F>>(
699    program: &Program<F>,
700    inventory: &ExecutorInventory<E>,
701) -> usize {
702    program
703        .instructions_and_debug_infos
704        .iter()
705        .map(|inst_opt| {
706            if let Some((inst, _)) = inst_opt {
707                if let Some(size) = system_opcode_pre_compute_size(inst) {
708                    size
709                } else {
710                    inventory
711                        .get_executor(inst.opcode)
712                        .map(|executor| executor.metered_pre_compute_size())
713                        .unwrap()
714                }
715            } else {
716                0
717            }
718        })
719        .max()
720        .unwrap()
721        .next_power_of_two()
722}
723
724fn system_opcode_pre_compute_size<F>(inst: &Instruction<F>) -> Option<usize> {
725    if inst.opcode == SystemOpcode::TERMINATE.global_opcode() {
726        return Some(size_of::<TerminatePreCompute>());
727    }
728    None
729}
730
731#[cfg(not(feature = "tco"))]
732fn get_pre_compute_instructions<'a, F, Ctx, E>(
733    program: &Program<F>,
734    inventory: &'a ExecutorInventory<E>,
735    pre_compute: &mut [&mut [u8]],
736) -> Result<Vec<PreComputeInstruction<'a, F, Ctx>>, StaticProgramError>
737where
738    F: PrimeField32,
739    Ctx: ExecutionCtxTrait,
740    E: Executor<F>,
741{
742    let unreachable_handler: ExecuteFunc<F, Ctx> = |_, _, pc, _, exec_state| {
743        exec_state.exit_code = Err(ExecutionError::Unreachable(*pc));
744    };
745
746    repeat_n(&None, get_pc_index(program.pc_base))
747        .chain(program.instructions_and_debug_infos.iter())
748        .zip_eq(pre_compute.iter_mut())
749        .enumerate()
750        .map(|(i, (inst_opt, buf))| {
751            // SAFETY: we cast to raw pointer and then borrow to remove the lifetime. This
752            // is safe only in the current context because `buf` comes
753            // from `pre_compute_buf` which will outlive the returned
754            // `PreComputeInstruction`s.
755            let buf: &mut [u8] = unsafe { &mut *(*buf as *mut [u8]) };
756            let pre_inst = if let Some((inst, _)) = inst_opt {
757                tracing::trace!("get_pre_compute_instruction {inst:?}");
758                let pc = program.pc_base + i as u32 * DEFAULT_PC_STEP;
759                if let Some(handler) = get_system_opcode_handler(inst, buf) {
760                    PreComputeInstruction {
761                        handler,
762                        pre_compute: buf,
763                    }
764                } else if let Some(executor) = inventory.get_executor(inst.opcode) {
765                    PreComputeInstruction {
766                        handler: executor.pre_compute(pc, inst, buf)?,
767                        pre_compute: buf,
768                    }
769                } else {
770                    return Err(StaticProgramError::DisabledOperation {
771                        pc,
772                        opcode: inst.opcode,
773                    });
774                }
775            } else {
776                // Dead instruction at this pc
777                PreComputeInstruction {
778                    handler: unreachable_handler,
779                    pre_compute: buf,
780                }
781            };
782            Ok(pre_inst)
783        })
784        .collect::<Result<Vec<_>, _>>()
785}
786
787#[cfg(not(feature = "tco"))]
788fn get_metered_pre_compute_instructions<'a, F, Ctx, E>(
789    program: &Program<F>,
790    inventory: &'a ExecutorInventory<E>,
791    executor_idx_to_air_idx: &[usize],
792    pre_compute: &mut [&mut [u8]],
793) -> Result<Vec<PreComputeInstruction<'a, F, Ctx>>, StaticProgramError>
794where
795    F: PrimeField32,
796    Ctx: MeteredExecutionCtxTrait,
797    E: MeteredExecutor<F>,
798{
799    let unreachable_handler: ExecuteFunc<F, Ctx> = |_, _, pc, _, exec_state| {
800        exec_state.exit_code = Err(ExecutionError::Unreachable(*pc));
801    };
802    repeat_n(&None, get_pc_index(program.pc_base))
803        .chain(program.instructions_and_debug_infos.iter())
804        .zip_eq(pre_compute.iter_mut())
805        .enumerate()
806        .map(|(i, (inst_opt, buf))| {
807            // SAFETY: we cast to raw pointer and then borrow to remove the lifetime. This
808            // is safe only in the current context because `buf` comes
809            // from `pre_compute_buf` which will outlive the returned
810            // `PreComputeInstruction`s.
811            let buf: &mut [u8] = unsafe { &mut *(*buf as *mut [u8]) };
812            let pre_inst = if let Some((inst, _)) = inst_opt {
813                tracing::trace!("get_metered_pre_compute_instruction {inst:?}");
814                let pc = program.pc_base + i as u32 * DEFAULT_PC_STEP;
815                if let Some(handler) = get_system_opcode_handler(inst, buf) {
816                    PreComputeInstruction {
817                        handler,
818                        pre_compute: buf,
819                    }
820                } else if let Some(&executor_idx) = inventory.instruction_lookup.get(&inst.opcode) {
821                    let executor_idx = executor_idx as usize;
822                    let executor = inventory
823                        .executors
824                        .get(executor_idx)
825                        .expect("ExecutorInventory ensures executor_idx is in bounds");
826                    let air_idx = executor_idx_to_air_idx[executor_idx];
827                    PreComputeInstruction {
828                        handler: executor.metered_pre_compute(air_idx, pc, inst, buf)?,
829                        pre_compute: buf,
830                    }
831                } else {
832                    return Err(StaticProgramError::DisabledOperation {
833                        pc,
834                        opcode: inst.opcode,
835                    });
836                }
837            } else {
838                PreComputeInstruction {
839                    handler: unreachable_handler,
840                    pre_compute: buf,
841                }
842            };
843            Ok(pre_inst)
844        })
845        .collect::<Result<Vec<_>, _>>()
846}
847
848fn get_system_opcode_handler<F: PrimeField32, Ctx: ExecutionCtxTrait>(
849    inst: &Instruction<F>,
850    buf: &mut [u8],
851) -> Option<ExecuteFunc<F, Ctx>> {
852    if inst.opcode == SystemOpcode::TERMINATE.global_opcode() {
853        let pre_compute: &mut TerminatePreCompute = buf.borrow_mut();
854        pre_compute.exit_code = inst.c.as_canonical_u32();
855        return Some(terminate_execute_e12_impl);
856    }
857    None
858}
859
860/// Errors if exit code is either error or terminated with non-successful exit code.
861fn check_exit_code(exit_code: Result<Option<u32>, ExecutionError>) -> Result<(), ExecutionError> {
862    let exit_code = exit_code?;
863    if let Some(exit_code) = exit_code {
864        // This means execution did terminate
865        if exit_code != ExitCode::Success as u32 {
866            return Err(ExecutionError::FailedWithExitCode(exit_code));
867        }
868    }
869    Ok(())
870}
871
872/// Same as [check_exit_code] but errors if program did not terminate.
873fn check_termination(exit_code: Result<Option<u32>, ExecutionError>) -> Result<(), ExecutionError> {
874    let did_terminate = matches!(exit_code.as_ref(), Ok(Some(_)));
875    check_exit_code(exit_code)?;
876    match did_terminate {
877        true => Ok(()),
878        false => Err(ExecutionError::DidNotTerminate),
879    }
880}