openvm_circuit/arch/
interpreter.rs

1#[cfg(feature = "tco")]
2use std::marker::PhantomData;
3use std::{
4    alloc::{alloc, dealloc, handle_alloc_error, Layout},
5    borrow::{Borrow, BorrowMut},
6    iter::repeat_n,
7    ptr::NonNull,
8};
9
10use itertools::Itertools;
11use openvm_circuit_primitives_derive::AlignedBytesBorrow;
12use openvm_instructions::{
13    exe::{SparseMemoryImage, VmExe},
14    instruction::Instruction,
15    program::{Program, DEFAULT_PC_STEP},
16    LocalOpcode, SystemOpcode,
17};
18use openvm_stark_backend::p3_field::PrimeField32;
19use tracing::info_span;
20
21#[cfg(feature = "tco")]
22use crate::arch::Handler;
23use crate::{
24    arch::{
25        execution_mode::{
26            ExecutionCtx, ExecutionCtxTrait, MeteredCostCtx, MeteredCtx, MeteredExecutionCtxTrait,
27            Segment,
28        },
29        ExecuteFunc, ExecutionError, Executor, ExecutorInventory, ExitCode, MeteredExecutor,
30        StaticProgramError, Streams, SystemConfig, VmExecState, VmState,
31    },
32    system::memory::online::GuestMemory,
33};
34
35/// VM pure executor(E1/E2 executor) which doesn't consider trace generation.
36/// Note: This executor doesn't hold any VM state and can be used for multiple execution.
37///
38/// The generic `Ctx` and constructor determine whether this supported pure execution or metered
39/// execution.
40// NOTE: the lifetime 'a represents the lifetime of borrowed ExecutorInventory, which must outlive
41// the InterpretedInstance because `pre_compute_buf` may contain pointers to references held by
42// executors.
43pub struct InterpretedInstance<'a, F, Ctx> {
44    system_config: SystemConfig,
45    // SAFETY: this is not actually dead code, but `pre_compute_insns` contains raw pointer refers
46    // to this buffer.
47    #[allow(dead_code)]
48    pre_compute_buf: AlignedBuf,
49    /// Instruction table of function pointers and pointers to the pre-computed buffer. Indexed by
50    /// `pc_index = pc / DEFAULT_PC_STEP`.
51    /// SAFETY: The first `pc_base / DEFAULT_PC_STEP` entries will be unreachable. We do this to
52    /// avoid needing to subtract `pc_base` during runtime.
53    #[cfg(not(feature = "tco"))]
54    pre_compute_insns: Vec<PreComputeInstruction<'a, F, Ctx>>,
55    #[cfg(feature = "tco")]
56    pre_compute_max_size: usize,
57    /// Handler function pointers for tail call optimization.
58    #[cfg(feature = "tco")]
59    handlers: Vec<Handler<F, Ctx>>,
60
61    pc_start: u32,
62
63    init_memory: SparseMemoryImage,
64    #[cfg(feature = "tco")]
65    phantom: PhantomData<&'a ()>,
66}
67
68#[cfg_attr(feature = "tco", allow(dead_code))]
69struct PreComputeInstruction<'a, F, Ctx> {
70    pub handler: ExecuteFunc<F, Ctx>,
71    pub pre_compute: &'a [u8],
72}
73
74#[derive(AlignedBytesBorrow, Clone)]
75#[repr(C)]
76struct TerminatePreCompute {
77    exit_code: u32,
78}
79
80macro_rules! run {
81    ($span:literal, $interpreter:ident, $exec_state:ident, $ctx:ident) => {{
82        #[cfg(feature = "metrics")]
83        let start = std::time::Instant::now();
84        #[cfg(feature = "metrics")]
85        let start_instret = $exec_state.instret;
86
87        info_span!($span).in_scope(|| -> Result<(), ExecutionError> {
88            // SAFETY:
89            // - it is the responsibility of each Executor to ensure that pre_compute_insts contains
90            //   valid function pointers and pre-computed data
91            #[cfg(not(feature = "tco"))]
92            unsafe {
93                tracing::debug!("execute_trampoline");
94                execute_trampoline(&mut $exec_state, &$interpreter.pre_compute_insns);
95            }
96            #[cfg(feature = "tco")]
97            {
98                tracing::debug!("execute_tco");
99                let handler = $interpreter
100                    .get_handler($exec_state.pc)
101                    .ok_or(ExecutionError::PcOutOfBounds($exec_state.pc))?;
102                // SAFETY:
103                // - handler is generated by Executor, MeteredExecutor traits
104                // - it is the responsibility of each Executor to ensure handler is safe given a
105                //   valid VM state
106                unsafe {
107                    handler($interpreter, &mut $exec_state);
108                }
109
110                if $exec_state
111                    .exit_code
112                    .as_ref()
113                    .is_ok_and(|exit_code| exit_code.is_some())
114                {
115                    $ctx::on_terminate(&mut $exec_state);
116                }
117            }
118            Ok(())
119        })?;
120
121        #[cfg(feature = "metrics")]
122        {
123            let elapsed = start.elapsed();
124            let insns = $exec_state.instret - start_instret;
125            tracing::info!("instructions_executed={insns}");
126            metrics::counter!(concat!($span, "_insns")).absolute(insns);
127            metrics::gauge!(concat!($span, "_insn_mi/s"))
128                .set(insns as f64 / elapsed.as_micros() as f64);
129        }
130    }};
131}
132
133// Constructors for E1 and E2 respectively, which generate pre-computed buffers and function
134// pointers
135// - Generic in `Ctx`
136
137impl<'a, F, Ctx> InterpretedInstance<'a, F, Ctx>
138where
139    F: PrimeField32,
140    Ctx: ExecutionCtxTrait,
141{
142    /// Creates a new interpreter instance for pure execution.
143    // (E1 execution)
144    pub fn new<E>(
145        inventory: &'a ExecutorInventory<E>,
146        exe: &VmExe<F>,
147    ) -> Result<Self, StaticProgramError>
148    where
149        E: Executor<F>,
150    {
151        let program = &exe.program;
152        let pre_compute_max_size = get_pre_compute_max_size(program, inventory);
153        let mut pre_compute_buf = alloc_pre_compute_buf(program, pre_compute_max_size);
154        let mut split_pre_compute_buf =
155            split_pre_compute_buf(program, &mut pre_compute_buf, pre_compute_max_size);
156        #[cfg_attr(feature = "tco", allow(unused_variables))]
157        let pre_compute_insns = get_pre_compute_instructions::<F, Ctx, E>(
158            program,
159            inventory,
160            &mut split_pre_compute_buf,
161        )?;
162        let pc_start = exe.pc_start;
163        let init_memory = exe.init_memory.clone();
164        #[cfg(feature = "tco")]
165        let handlers = repeat_n(&None, get_pc_index(program.pc_base))
166            .chain(program.instructions_and_debug_infos.iter())
167            .zip_eq(split_pre_compute_buf.iter_mut())
168            .enumerate()
169            .map(
170                |(pc_idx, (inst_opt, pre_compute))| -> Result<Handler<F, Ctx>, StaticProgramError> {
171                    if let Some((inst, _)) = inst_opt {
172                        let pc = pc_idx as u32 * DEFAULT_PC_STEP;
173                        if get_system_opcode_handler::<F, Ctx>(inst, pre_compute).is_some() {
174                            Ok(terminate_execute_e12_tco_handler)
175                        } else {
176                            // unwrap because get_pre_compute_instructions would have errored
177                            // already on DisabledOperation
178                            let executor = inventory.get_executor(inst.opcode).unwrap();
179                            executor.handler(pc, inst, pre_compute)
180                        }
181                    } else {
182                        Ok(unreachable_tco_handler)
183                    }
184                },
185            )
186            .collect::<Result<Vec<_>, _>>()?;
187
188        Ok(Self {
189            system_config: inventory.config().clone(),
190            pre_compute_buf,
191            #[cfg(not(feature = "tco"))]
192            pre_compute_insns,
193            pc_start,
194            init_memory,
195            #[cfg(feature = "tco")]
196            pre_compute_max_size,
197            #[cfg(feature = "tco")]
198            handlers,
199            #[cfg(feature = "tco")]
200            phantom: PhantomData,
201        })
202    }
203
204    /// # Safety
205    /// - This function assumes that the `pc` is within program bounds - this should be the case if
206    ///   the pc is checked to be in bounds before jumping to it.
207    /// - The returned slice may not be entirely initialized, but it is the job of each Executor to
208    /// initialize the parts of the buffer that the instruction handler will use.
209    #[cfg(feature = "tco")]
210    #[inline(always)]
211    pub fn get_pre_compute(&self, pc: u32) -> &[u8] {
212        let pc_idx = get_pc_index(pc);
213        // SAFETY:
214        // - we assume that pc is in bounds
215        // - pre_compute_buf is allocated for pre_compute_max_size * program_len bytes, with each
216        //   instruction getting pre_compute_max_size bytes
217        // - self.pre_compute_buf.ptr is non-null
218        // - initialization of the contents of the slice is the responsibility of each Executor
219        debug_assert!(
220            (pc_idx + 1) * self.pre_compute_max_size <= self.pre_compute_buf.layout.size()
221        );
222        unsafe {
223            let ptr = self
224                .pre_compute_buf
225                .ptr
226                .add(pc_idx * self.pre_compute_max_size);
227            std::slice::from_raw_parts(ptr, self.pre_compute_max_size)
228        }
229    }
230
231    #[cfg(feature = "tco")]
232    #[inline(always)]
233    pub fn get_handler(&self, pc: u32) -> Option<Handler<F, Ctx>> {
234        let pc_idx = get_pc_index(pc);
235        self.handlers.get(pc_idx).copied()
236    }
237}
238
239impl<'a, F, Ctx> InterpretedInstance<'a, F, Ctx>
240where
241    F: PrimeField32,
242    Ctx: MeteredExecutionCtxTrait,
243{
244    /// Creates a new interpreter instance for pure execution.
245    // (E1 execution)
246    pub fn new_metered<E>(
247        inventory: &'a ExecutorInventory<E>,
248        exe: &VmExe<F>,
249        executor_idx_to_air_idx: &[usize],
250    ) -> Result<Self, StaticProgramError>
251    where
252        E: MeteredExecutor<F>,
253    {
254        let program = &exe.program;
255        let pre_compute_max_size = get_metered_pre_compute_max_size(program, inventory);
256        let mut pre_compute_buf = alloc_pre_compute_buf(program, pre_compute_max_size);
257        let mut split_pre_compute_buf =
258            split_pre_compute_buf(program, &mut pre_compute_buf, pre_compute_max_size);
259        #[cfg_attr(feature = "tco", allow(unused_variables))]
260        let pre_compute_insns = get_metered_pre_compute_instructions::<F, Ctx, E>(
261            program,
262            inventory,
263            executor_idx_to_air_idx,
264            &mut split_pre_compute_buf,
265        )?;
266
267        let pc_start = exe.pc_start;
268        let init_memory = exe.init_memory.clone();
269        #[cfg(feature = "tco")]
270        let handlers = repeat_n(&None, get_pc_index(program.pc_base))
271            .chain(program.instructions_and_debug_infos.iter())
272            .zip_eq(split_pre_compute_buf.iter_mut())
273            .enumerate()
274            .map(
275                |(pc_idx, (inst_opt, pre_compute))| -> Result<Handler<F, Ctx>, StaticProgramError> {
276                    if let Some((inst, _)) = inst_opt {
277                        let pc = pc_idx as u32 * DEFAULT_PC_STEP;
278                        if get_system_opcode_handler::<F, Ctx>(inst, pre_compute).is_some() {
279                            Ok(terminate_execute_e12_tco_handler)
280                        } else {
281                            // unwrap because get_pre_compute_instructions would have errored
282                            // already on DisabledOperation
283                            let executor_idx = inventory.instruction_lookup[&inst.opcode] as usize;
284                            let executor = &inventory.executors[executor_idx];
285                            let air_idx = executor_idx_to_air_idx[executor_idx];
286                            executor.metered_handler(air_idx, pc, inst, pre_compute)
287                        }
288                    } else {
289                        Ok(unreachable_tco_handler)
290                    }
291                },
292            )
293            .collect::<Result<Vec<_>, _>>()?;
294
295        Ok(Self {
296            system_config: inventory.config().clone(),
297            pre_compute_buf,
298            #[cfg(not(feature = "tco"))]
299            pre_compute_insns,
300            pc_start,
301            init_memory,
302            #[cfg(feature = "tco")]
303            pre_compute_max_size,
304            #[cfg(feature = "tco")]
305            handlers,
306            #[cfg(feature = "tco")]
307            phantom: PhantomData,
308        })
309    }
310}
311
312// Execute functions specialize to relevant Ctx types to provide more streamlines APIs
313
314impl<F> InterpretedInstance<'_, F, ExecutionCtx>
315where
316    F: PrimeField32,
317{
318    /// Pure execution, without metering, for the given `inputs`. Execution begins from the initial
319    /// state specified by the `VmExe`. This function executes the program until either termination
320    /// if `num_insns` is `None` or for exactly `num_insns` instructions if `num_insns` is `Some`.
321    ///
322    /// Returns the final VM state when execution stops.
323    pub fn execute(
324        &self,
325        inputs: impl Into<Streams<F>>,
326        num_insns: Option<u64>,
327    ) -> Result<VmState<F, GuestMemory>, ExecutionError> {
328        let vm_state = VmState::initial(
329            &self.system_config,
330            &self.init_memory,
331            self.pc_start,
332            inputs,
333        );
334        self.execute_from_state(vm_state, num_insns)
335    }
336
337    /// Pure execution, without metering, from the given `VmState`. This function executes the
338    /// program until either termination if `num_insns` is `None` or for exactly `num_insns`
339    /// instructions if `num_insns` is `Some`.
340    ///
341    /// Returns the final VM state when execution stops.
342    pub fn execute_from_state(
343        &self,
344        from_state: VmState<F, GuestMemory>,
345        num_insns: Option<u64>,
346    ) -> Result<VmState<F, GuestMemory>, ExecutionError> {
347        let ctx = ExecutionCtx::new(num_insns);
348        let mut exec_state = VmExecState::new(from_state, ctx);
349        run!("execute_e1", self, exec_state, ExecutionCtx);
350        if num_insns.is_some() {
351            check_exit_code(exec_state.exit_code)?;
352        } else {
353            check_termination(exec_state.exit_code)?;
354        }
355        Ok(exec_state.vm_state)
356    }
357}
358
359impl<F> InterpretedInstance<'_, F, MeteredCtx>
360where
361    F: PrimeField32,
362{
363    /// Metered execution for the given `inputs`. Execution begins from the initial
364    /// state specified by the `VmExe`. This function executes the program until termination.
365    ///
366    /// Returns the segmentation boundary data and the final VM state when execution stops.
367    pub fn execute_metered(
368        &self,
369        inputs: impl Into<Streams<F>>,
370        ctx: MeteredCtx,
371    ) -> Result<(Vec<Segment>, VmState<F, GuestMemory>), ExecutionError> {
372        let vm_state = VmState::initial(
373            &self.system_config,
374            &self.init_memory,
375            self.pc_start,
376            inputs,
377        );
378        self.execute_metered_from_state(vm_state, ctx)
379    }
380
381    /// Metered execution for the given `VmState`. This function executes the program until
382    /// termination.
383    ///
384    /// Returns the segmentation boundary data and the final VM state when execution stops.
385    ///
386    /// The [MeteredCtx] can be constructed using either
387    /// [VmExecutor::build_metered_ctx](super::VmExecutor::build_metered_ctx) or
388    /// [VirtualMachine::build_metered_ctx](super::VirtualMachine::build_metered_ctx).
389    pub fn execute_metered_from_state(
390        &self,
391        from_state: VmState<F, GuestMemory>,
392        ctx: MeteredCtx,
393    ) -> Result<(Vec<Segment>, VmState<F, GuestMemory>), ExecutionError> {
394        let mut exec_state = VmExecState::new(from_state, ctx);
395        // Start execution
396        run!("execute_metered", self, exec_state, MeteredCtx);
397        check_termination(exec_state.exit_code)?;
398        let VmExecState { vm_state, ctx, .. } = exec_state;
399        Ok((ctx.into_segments(), vm_state))
400    }
401}
402
403impl<F> InterpretedInstance<'_, F, MeteredCostCtx>
404where
405    F: PrimeField32,
406{
407    /// Metered cost execution for the given `inputs`. Execution begins from the initial
408    /// state specified by the `VmExe`. This function executes the program until termination.
409    ///
410    /// Returns the trace cost and final VM state when execution stops.
411    pub fn execute_metered_cost(
412        &self,
413        inputs: impl Into<Streams<F>>,
414        ctx: MeteredCostCtx,
415    ) -> Result<(u64, VmState<F, GuestMemory>), ExecutionError> {
416        let vm_state = VmState::initial(
417            &self.system_config,
418            &self.init_memory,
419            self.pc_start,
420            inputs,
421        );
422        self.execute_metered_cost_from_state(vm_state, ctx)
423    }
424
425    /// Metered cost execution for the given `VmState`. This function executes the program until
426    /// termination.
427    ///
428    /// Returns the trace cost and final VM state when execution stops.
429    pub fn execute_metered_cost_from_state(
430        &self,
431        from_state: VmState<F, GuestMemory>,
432        ctx: MeteredCostCtx,
433    ) -> Result<(u64, VmState<F, GuestMemory>), ExecutionError> {
434        let mut exec_state = VmExecState::new(from_state, ctx);
435        // Start execution
436        run!("execute_metered_cost", self, exec_state, MeteredCostCtx);
437        check_exit_code(exec_state.exit_code)?;
438        let VmExecState { ctx, vm_state, .. } = exec_state;
439        let cost = ctx.cost;
440        Ok((cost, vm_state))
441    }
442}
443
444fn alloc_pre_compute_buf<F>(program: &Program<F>, pre_compute_max_size: usize) -> AlignedBuf {
445    let base_idx = get_pc_index(program.pc_base);
446    let padded_program_len = base_idx + program.instructions_and_debug_infos.len();
447    let buf_len = padded_program_len * pre_compute_max_size;
448    AlignedBuf::uninit(buf_len, pre_compute_max_size)
449}
450
451fn split_pre_compute_buf<'a, F>(
452    program: &Program<F>,
453    pre_compute_buf: &'a mut AlignedBuf,
454    pre_compute_max_size: usize,
455) -> Vec<&'a mut [u8]> {
456    let base_idx = get_pc_index(program.pc_base);
457    let padded_program_len = base_idx + program.instructions_and_debug_infos.len();
458    let buf_len = padded_program_len * pre_compute_max_size;
459    // SAFETY:
460    // - pre_compute_buf.ptr was allocated with exactly buf_len bytes
461    // - lifetime 'a ensures the returned slices don't outlive the AlignedBuf
462    let pre_compute_buf = unsafe { std::slice::from_raw_parts_mut(pre_compute_buf.ptr, buf_len) };
463    pre_compute_buf
464        .chunks_exact_mut(pre_compute_max_size)
465        .collect()
466}
467
468/// Executes using function pointers with the trampoline (loop) approach.
469///
470/// # Safety
471/// The `fn_ptrs` pointer to pre-computed buffers that outlive this function.
472#[inline(always)]
473unsafe fn execute_trampoline<F: PrimeField32, Ctx: ExecutionCtxTrait>(
474    vm_state: &mut VmExecState<F, GuestMemory, Ctx>,
475    fn_ptrs: &[PreComputeInstruction<F, Ctx>],
476) {
477    while vm_state
478        .exit_code
479        .as_ref()
480        .is_ok_and(|exit_code| exit_code.is_none())
481    {
482        if Ctx::should_suspend(vm_state) {
483            break;
484        }
485        let pc_index = get_pc_index(vm_state.pc);
486        if let Some(inst) = fn_ptrs.get(pc_index) {
487            // SAFETY: pre_compute assumed to live long enough
488            unsafe { (inst.handler)(inst.pre_compute, vm_state) };
489        } else {
490            vm_state.exit_code = Err(ExecutionError::PcOutOfBounds(vm_state.pc));
491        }
492    }
493    if vm_state
494        .exit_code
495        .as_ref()
496        .is_ok_and(|exit_code| exit_code.is_some())
497    {
498        Ctx::on_terminate(vm_state);
499    }
500}
501
502#[inline(always)]
503pub fn get_pc_index(pc: u32) -> usize {
504    (pc / DEFAULT_PC_STEP) as usize
505}
506
507/// Bytes allocated according to the given Layout
508// @dev: This is duplicate from the openvm crate, but it doesn't seem worth importing `openvm` here
509// just for this.
510pub struct AlignedBuf {
511    pub ptr: *mut u8,
512    pub layout: Layout,
513}
514
515impl AlignedBuf {
516    /// Allocate a new buffer whose start address is aligned to `align` bytes.
517    /// *NOTE* if `len` is zero then a creates new `NonNull` that is dangling and 16-byte aligned.
518    pub fn uninit(len: usize, align: usize) -> Self {
519        let layout = Layout::from_size_align(len, align).unwrap();
520        if layout.size() == 0 {
521            return Self {
522                ptr: NonNull::<u128>::dangling().as_ptr() as *mut u8,
523                layout,
524            };
525        }
526        // SAFETY: `len` is nonzero
527        let ptr = unsafe { alloc(layout) };
528        if ptr.is_null() {
529            handle_alloc_error(layout);
530        }
531        AlignedBuf { ptr, layout }
532    }
533}
534
535impl Drop for AlignedBuf {
536    fn drop(&mut self) {
537        if self.layout.size() != 0 {
538            // SAFETY: self.ptr was allocated with self.layout in AlignedBuf::uninit
539            unsafe {
540                dealloc(self.ptr, self.layout);
541            }
542        }
543    }
544}
545
546#[inline(always)]
547unsafe fn terminate_execute_e12_impl<F: PrimeField32, CTX: ExecutionCtxTrait>(
548    pre_compute: &[u8],
549    vm_state: &mut VmExecState<F, GuestMemory, CTX>,
550) {
551    let pre_compute: &TerminatePreCompute = pre_compute.borrow();
552    vm_state.instret += 1;
553    vm_state.exit_code = Ok(Some(pre_compute.exit_code));
554}
555
556#[cfg(feature = "tco")]
557unsafe fn terminate_execute_e12_tco_handler<F: PrimeField32, CTX: ExecutionCtxTrait>(
558    interpreter: &InterpretedInstance<F, CTX>,
559    vm_state: &mut VmExecState<F, GuestMemory, CTX>,
560) {
561    let pre_compute = interpreter.get_pre_compute(vm_state.pc);
562    terminate_execute_e12_impl(pre_compute, vm_state);
563}
564#[cfg(feature = "tco")]
565unsafe fn unreachable_tco_handler<F: PrimeField32, CTX>(
566    _: &InterpretedInstance<F, CTX>,
567    vm_state: &mut VmExecState<F, GuestMemory, CTX>,
568) {
569    vm_state.exit_code = Err(ExecutionError::Unreachable(vm_state.pc));
570}
571
572fn get_pre_compute_max_size<F, E: Executor<F>>(
573    program: &Program<F>,
574    inventory: &ExecutorInventory<E>,
575) -> usize {
576    program
577        .instructions_and_debug_infos
578        .iter()
579        .map(|inst_opt| {
580            if let Some((inst, _)) = inst_opt {
581                if let Some(size) = system_opcode_pre_compute_size(inst) {
582                    size
583                } else {
584                    inventory
585                        .get_executor(inst.opcode)
586                        .map(|executor| executor.pre_compute_size())
587                        .unwrap()
588                }
589            } else {
590                0
591            }
592        })
593        .max()
594        .unwrap()
595        .next_power_of_two()
596}
597
598fn get_metered_pre_compute_max_size<F, E: MeteredExecutor<F>>(
599    program: &Program<F>,
600    inventory: &ExecutorInventory<E>,
601) -> usize {
602    program
603        .instructions_and_debug_infos
604        .iter()
605        .map(|inst_opt| {
606            if let Some((inst, _)) = inst_opt {
607                if let Some(size) = system_opcode_pre_compute_size(inst) {
608                    size
609                } else {
610                    inventory
611                        .get_executor(inst.opcode)
612                        .map(|executor| executor.metered_pre_compute_size())
613                        .unwrap()
614                }
615            } else {
616                0
617            }
618        })
619        .max()
620        .unwrap()
621        .next_power_of_two()
622}
623
624fn system_opcode_pre_compute_size<F>(inst: &Instruction<F>) -> Option<usize> {
625    if inst.opcode == SystemOpcode::TERMINATE.global_opcode() {
626        return Some(size_of::<TerminatePreCompute>());
627    }
628    None
629}
630
631fn get_pre_compute_instructions<'a, F, Ctx, E>(
632    program: &Program<F>,
633    inventory: &'a ExecutorInventory<E>,
634    pre_compute: &mut [&mut [u8]],
635) -> Result<Vec<PreComputeInstruction<'a, F, Ctx>>, StaticProgramError>
636where
637    F: PrimeField32,
638    Ctx: ExecutionCtxTrait,
639    E: Executor<F>,
640{
641    let unreachable_handler: ExecuteFunc<F, Ctx> = |_, vm_state| {
642        vm_state.exit_code = Err(ExecutionError::Unreachable(vm_state.pc));
643    };
644
645    repeat_n(&None, get_pc_index(program.pc_base))
646        .chain(program.instructions_and_debug_infos.iter())
647        .zip_eq(pre_compute.iter_mut())
648        .enumerate()
649        .map(|(i, (inst_opt, buf))| {
650            // SAFETY: we cast to raw pointer and then borrow to remove the lifetime. This
651            // is safe only in the current context because `buf` comes
652            // from `pre_compute_buf` which will outlive the returned
653            // `PreComputeInstruction`s.
654            let buf: &mut [u8] = unsafe { &mut *(*buf as *mut [u8]) };
655            let pre_inst = if let Some((inst, _)) = inst_opt {
656                tracing::trace!("get_pre_compute_instruction {inst:?}");
657                let pc = program.pc_base + i as u32 * DEFAULT_PC_STEP;
658                if let Some(handler) = get_system_opcode_handler(inst, buf) {
659                    PreComputeInstruction {
660                        handler,
661                        pre_compute: buf,
662                    }
663                } else if let Some(executor) = inventory.get_executor(inst.opcode) {
664                    PreComputeInstruction {
665                        handler: executor.pre_compute(pc, inst, buf)?,
666                        pre_compute: buf,
667                    }
668                } else {
669                    return Err(StaticProgramError::DisabledOperation {
670                        pc,
671                        opcode: inst.opcode,
672                    });
673                }
674            } else {
675                // Dead instruction at this pc
676                PreComputeInstruction {
677                    handler: unreachable_handler,
678                    pre_compute: buf,
679                }
680            };
681            Ok(pre_inst)
682        })
683        .collect::<Result<Vec<_>, _>>()
684}
685
686fn get_metered_pre_compute_instructions<'a, F, Ctx, E>(
687    program: &Program<F>,
688    inventory: &'a ExecutorInventory<E>,
689    executor_idx_to_air_idx: &[usize],
690    pre_compute: &mut [&mut [u8]],
691) -> Result<Vec<PreComputeInstruction<'a, F, Ctx>>, StaticProgramError>
692where
693    F: PrimeField32,
694    Ctx: MeteredExecutionCtxTrait,
695    E: MeteredExecutor<F>,
696{
697    let unreachable_handler: ExecuteFunc<F, Ctx> = |_, vm_state| {
698        vm_state.exit_code = Err(ExecutionError::Unreachable(vm_state.pc));
699    };
700    repeat_n(&None, get_pc_index(program.pc_base))
701        .chain(program.instructions_and_debug_infos.iter())
702        .zip_eq(pre_compute.iter_mut())
703        .enumerate()
704        .map(|(i, (inst_opt, buf))| {
705            // SAFETY: we cast to raw pointer and then borrow to remove the lifetime. This
706            // is safe only in the current context because `buf` comes
707            // from `pre_compute_buf` which will outlive the returned
708            // `PreComputeInstruction`s.
709            let buf: &mut [u8] = unsafe { &mut *(*buf as *mut [u8]) };
710            let pre_inst = if let Some((inst, _)) = inst_opt {
711                tracing::trace!("get_metered_pre_compute_instruction {inst:?}");
712                let pc = program.pc_base + i as u32 * DEFAULT_PC_STEP;
713                if let Some(handler) = get_system_opcode_handler(inst, buf) {
714                    PreComputeInstruction {
715                        handler,
716                        pre_compute: buf,
717                    }
718                } else if let Some(&executor_idx) = inventory.instruction_lookup.get(&inst.opcode) {
719                    let executor_idx = executor_idx as usize;
720                    let executor = inventory
721                        .executors
722                        .get(executor_idx)
723                        .expect("ExecutorInventory ensures executor_idx is in bounds");
724                    let air_idx = executor_idx_to_air_idx[executor_idx];
725                    PreComputeInstruction {
726                        handler: executor.metered_pre_compute(air_idx, pc, inst, buf)?,
727                        pre_compute: buf,
728                    }
729                } else {
730                    return Err(StaticProgramError::DisabledOperation {
731                        pc,
732                        opcode: inst.opcode,
733                    });
734                }
735            } else {
736                PreComputeInstruction {
737                    handler: unreachable_handler,
738                    pre_compute: buf,
739                }
740            };
741            Ok(pre_inst)
742        })
743        .collect::<Result<Vec<_>, _>>()
744}
745
746fn get_system_opcode_handler<F: PrimeField32, Ctx: ExecutionCtxTrait>(
747    inst: &Instruction<F>,
748    buf: &mut [u8],
749) -> Option<ExecuteFunc<F, Ctx>> {
750    if inst.opcode == SystemOpcode::TERMINATE.global_opcode() {
751        let pre_compute: &mut TerminatePreCompute = buf.borrow_mut();
752        pre_compute.exit_code = inst.c.as_canonical_u32();
753        return Some(terminate_execute_e12_impl);
754    }
755    None
756}
757
758/// Errors if exit code is either error or terminated with non-successful exit code.
759fn check_exit_code(exit_code: Result<Option<u32>, ExecutionError>) -> Result<(), ExecutionError> {
760    let exit_code = exit_code?;
761    if let Some(exit_code) = exit_code {
762        // This means execution did terminate
763        if exit_code != ExitCode::Success as u32 {
764            return Err(ExecutionError::FailedWithExitCode(exit_code));
765        }
766    }
767    Ok(())
768}
769
770/// Same as [check_exit_code] but errors if program did not terminate.
771fn check_termination(exit_code: Result<Option<u32>, ExecutionError>) -> Result<(), ExecutionError> {
772    let did_terminate = matches!(exit_code.as_ref(), Ok(Some(_)));
773    check_exit_code(exit_code)?;
774    match did_terminate {
775        true => Ok(()),
776        false => Err(ExecutionError::DidNotTerminate),
777    }
778}