openvm_circuit/arch/
interpreter.rs

1use std::{
2    alloc::{alloc, dealloc, handle_alloc_error, Layout},
3    borrow::{Borrow, BorrowMut},
4    iter::repeat_n,
5    ptr::NonNull,
6};
7
8use itertools::Itertools;
9use openvm_circuit_primitives_derive::AlignedBytesBorrow;
10use openvm_instructions::{
11    exe::{SparseMemoryImage, VmExe},
12    instruction::Instruction,
13    program::{Program, DEFAULT_PC_STEP},
14    LocalOpcode, SystemOpcode,
15};
16use openvm_stark_backend::p3_field::PrimeField32;
17
18#[cfg(feature = "tco")]
19use crate::arch::Handler;
20use crate::{
21    arch::{
22        execution_mode::{
23            ExecutionCtx, ExecutionCtxTrait, MeteredCostCtx, MeteredCtx, MeteredExecutionCtxTrait,
24            Segment,
25        },
26        ExecuteFunc, ExecutionError, Executor, ExecutorInventory, ExitCode, MeteredExecutor,
27        StaticProgramError, Streams, SystemConfig, VmExecState, VmState,
28    },
29    system::memory::online::GuestMemory,
30};
31
32/// VM pure executor(E1/E2 executor) which doesn't consider trace generation.
33/// Note: This executor doesn't hold any VM state and can be used for multiple execution.
34///
35/// The generic `Ctx` and constructor determine whether this supported pure execution or metered
36/// execution.
37// NOTE: the lifetime 'a represents the lifetime of borrowed ExecutorInventory, which must outlive
38// the InterpretedInstance because `pre_compute_buf` may contain pointers to references held by
39// executors.
40pub struct InterpretedInstance<F, Ctx> {
41    system_config: SystemConfig,
42    // SAFETY: this is not actually dead code, but `pre_compute_insns` contains raw pointer refers
43    // to this buffer.
44    #[allow(dead_code)]
45    pre_compute_buf: AlignedBuf,
46    /// Instruction table of function pointers and pointers to the pre-computed buffer. Indexed by
47    /// `pc_index = pc / DEFAULT_PC_STEP`.
48    /// SAFETY: The first `pc_base / DEFAULT_PC_STEP` entries will be unreachable. We do this to
49    /// avoid needing to subtract `pc_base` during runtime.
50    #[cfg(not(feature = "tco"))]
51    pre_compute_insns: Vec<PreComputeInstruction<F, Ctx>>,
52    #[cfg(feature = "tco")]
53    pre_compute_max_size: usize,
54    /// Handler function pointers for tail call optimization.
55    #[cfg(feature = "tco")]
56    handlers: Vec<Handler<F, Ctx>>,
57
58    pc_start: u32,
59
60    init_memory: SparseMemoryImage,
61}
62
63#[repr(C)]
64#[cfg_attr(feature = "tco", allow(dead_code))]
65pub struct PreComputeInstruction<F, Ctx> {
66    pub handler: ExecuteFunc<F, Ctx>,
67    pub pre_compute: *const u8,
68}
69
70unsafe impl<F, Ctx> Send for PreComputeInstruction<F, Ctx> {}
71unsafe impl<F, Ctx> Sync for PreComputeInstruction<F, Ctx> {}
72
73#[derive(AlignedBytesBorrow, Clone)]
74#[repr(C)]
75struct TerminatePreCompute {
76    exit_code: u32,
77}
78
79macro_rules! run {
80    ($span:literal, $interpreter:ident, $exec_state:ident, $ctx:ident) => {{
81        tracing::info_span!($span).in_scope(|| -> Result<(), ExecutionError> {
82            // SAFETY:
83            // - it is the responsibility of each Executor to ensure that pre_compute_insts contains
84            //   valid function pointers and pre-computed data
85            #[cfg(not(feature = "tco"))]
86            {
87                unsafe {
88                    execute_trampoline(&mut $exec_state, &$interpreter.pre_compute_insns);
89                }
90            }
91            #[cfg(feature = "tco")]
92            {
93                if $ctx::should_suspend(&mut $exec_state) {
94                    return Ok(());
95                }
96
97                let handler = $interpreter
98                    .get_handler($exec_state.pc())
99                    .ok_or(ExecutionError::PcOutOfBounds($exec_state.pc()))?;
100                // SAFETY:
101                // - handler is generated by Executor, MeteredExecutor traits
102                // - it is the responsibility of each Executor to ensure handler is safe given a
103                //   valid VM state
104                unsafe {
105                    handler($interpreter, &mut $exec_state);
106                }
107            }
108            Ok(())
109        })?;
110    }};
111}
112
113// Constructors for E1 and E2 respectively, which generate pre-computed buffers and function
114// pointers
115// - Generic in `Ctx`
116
117impl<F, Ctx> InterpretedInstance<F, Ctx>
118where
119    F: PrimeField32,
120    Ctx: ExecutionCtxTrait,
121{
122    /// Creates a new interpreter instance for pure execution.
123    // (E1 execution)
124    pub fn new<E>(
125        inventory: &ExecutorInventory<E>,
126        exe: &VmExe<F>,
127    ) -> Result<Self, StaticProgramError>
128    where
129        E: Executor<F>,
130    {
131        let program = &exe.program;
132        let pre_compute_max_size = get_pre_compute_max_size(program, inventory);
133        let mut pre_compute_buf = alloc_pre_compute_buf(program, pre_compute_max_size);
134        let mut split_pre_compute_buf =
135            split_pre_compute_buf(program, &mut pre_compute_buf, pre_compute_max_size);
136        #[cfg(not(feature = "tco"))]
137        let pre_compute_insns = get_pre_compute_instructions::<F, Ctx, E>(
138            program,
139            inventory,
140            &mut split_pre_compute_buf,
141        )?;
142        let pc_start = exe.pc_start;
143        let init_memory = exe.init_memory.clone();
144        #[cfg(feature = "tco")]
145        let handlers = repeat_n(&None, get_pc_index(program.pc_base))
146            .chain(program.instructions_and_debug_infos.iter())
147            .zip_eq(split_pre_compute_buf.iter_mut())
148            .enumerate()
149            .map(
150                |(pc_idx, (inst_opt, pre_compute))| -> Result<Handler<F, Ctx>, StaticProgramError> {
151                    if let Some((inst, _)) = inst_opt {
152                        let pc = pc_idx as u32 * DEFAULT_PC_STEP;
153                        if get_system_opcode_handler::<F, Ctx>(inst, pre_compute).is_some() {
154                            Ok(terminate_execute_e12_tco_handler)
155                        } else {
156                            // unwrap because get_pre_compute_instructions would have errored
157                            // already on DisabledOperation
158                            let executor = inventory.get_executor(inst.opcode).unwrap();
159                            executor.handler(pc, inst, pre_compute)
160                        }
161                    } else {
162                        Ok(unreachable_tco_handler)
163                    }
164                },
165            )
166            .collect::<Result<Vec<_>, _>>()?;
167
168        Ok(Self {
169            system_config: inventory.config().clone(),
170            pre_compute_buf,
171            #[cfg(not(feature = "tco"))]
172            pre_compute_insns,
173            pc_start,
174            init_memory,
175            #[cfg(feature = "tco")]
176            pre_compute_max_size,
177            #[cfg(feature = "tco")]
178            handlers,
179        })
180    }
181
182    pub fn create_initial_vm_state(&self, inputs: impl Into<Streams<F>>) -> VmState<F> {
183        VmState::initial(
184            &self.system_config,
185            &self.init_memory,
186            self.pc_start,
187            inputs,
188        )
189    }
190
191    /// # Safety
192    /// - This function assumes that the `pc` is within program bounds - this should be the case if
193    ///   the pc is checked to be in bounds before jumping to it.
194    /// - The returned slice may not be entirely initialized, but it is the job of each Executor to
195    ///   initialize the parts of the buffer that the instruction handler will use.
196    #[cfg(feature = "tco")]
197    #[inline(always)]
198    pub fn get_pre_compute(&self, pc: u32) -> *const u8 {
199        let pc_idx = get_pc_index(pc);
200        // SAFETY:
201        // - we assume that pc is in bounds
202        // - pre_compute_buf is allocated for pre_compute_max_size * program_len bytes, with each
203        //   instruction getting pre_compute_max_size bytes
204        // - self.pre_compute_buf.ptr is non-null
205        // - initialization of the contents of the slice is the responsibility of each Executor
206        debug_assert!(
207            (pc_idx + 1) * self.pre_compute_max_size <= self.pre_compute_buf.layout.size()
208        );
209        unsafe {
210            let ptr = self
211                .pre_compute_buf
212                .ptr
213                .add(pc_idx * self.pre_compute_max_size);
214            ptr
215        }
216    }
217
218    #[cfg(feature = "tco")]
219    #[inline(always)]
220    pub fn get_handler(&self, pc: u32) -> Option<Handler<F, Ctx>> {
221        let pc_idx = get_pc_index(pc);
222        self.handlers.get(pc_idx).copied()
223    }
224}
225
226impl<'a, F, Ctx> InterpretedInstance<F, Ctx>
227where
228    F: PrimeField32,
229    Ctx: MeteredExecutionCtxTrait,
230{
231    /// Creates a new interpreter instance for pure execution.
232    // (E1 execution)
233    pub fn new_metered<E>(
234        inventory: &'a ExecutorInventory<E>,
235        exe: &VmExe<F>,
236        executor_idx_to_air_idx: &[usize],
237    ) -> Result<Self, StaticProgramError>
238    where
239        E: MeteredExecutor<F>,
240    {
241        let program = &exe.program;
242        let pre_compute_max_size = get_metered_pre_compute_max_size(program, inventory);
243        let mut pre_compute_buf = alloc_pre_compute_buf(program, pre_compute_max_size);
244        let mut split_pre_compute_buf =
245            split_pre_compute_buf(program, &mut pre_compute_buf, pre_compute_max_size);
246        #[cfg(not(feature = "tco"))]
247        let pre_compute_insns = get_metered_pre_compute_instructions::<F, Ctx, E>(
248            program,
249            inventory,
250            executor_idx_to_air_idx,
251            &mut split_pre_compute_buf,
252        )?;
253
254        let pc_start = exe.pc_start;
255        let init_memory = exe.init_memory.clone();
256        #[cfg(feature = "tco")]
257        let handlers = repeat_n(&None, get_pc_index(program.pc_base))
258            .chain(program.instructions_and_debug_infos.iter())
259            .zip_eq(split_pre_compute_buf.iter_mut())
260            .enumerate()
261            .map(
262                |(pc_idx, (inst_opt, pre_compute))| -> Result<Handler<F, Ctx>, StaticProgramError> {
263                    if let Some((inst, _)) = inst_opt {
264                        let pc = pc_idx as u32 * DEFAULT_PC_STEP;
265                        if get_system_opcode_handler::<F, Ctx>(inst, pre_compute).is_some() {
266                            Ok(terminate_execute_e12_tco_handler)
267                        } else {
268                            // unwrap because get_pre_compute_instructions would have errored
269                            // already on DisabledOperation
270                            let executor_idx = inventory.instruction_lookup[&inst.opcode] as usize;
271                            let executor = &inventory.executors[executor_idx];
272                            let air_idx = executor_idx_to_air_idx[executor_idx];
273                            executor.metered_handler(air_idx, pc, inst, pre_compute)
274                        }
275                    } else {
276                        Ok(unreachable_tco_handler)
277                    }
278                },
279            )
280            .collect::<Result<Vec<_>, _>>()?;
281
282        Ok(Self {
283            system_config: inventory.config().clone(),
284            pre_compute_buf,
285            #[cfg(not(feature = "tco"))]
286            pre_compute_insns,
287            pc_start,
288            init_memory,
289            #[cfg(feature = "tco")]
290            pre_compute_max_size,
291            #[cfg(feature = "tco")]
292            handlers,
293        })
294    }
295}
296
297// Execute functions specialize to relevant Ctx types to provide more streamlines APIs
298
299impl<F> InterpretedInstance<F, ExecutionCtx>
300where
301    F: PrimeField32,
302{
303    /// Pure execution, without metering, for the given `inputs`. Execution begins from the initial
304    /// state specified by the `VmExe`. This function executes the program until either termination
305    /// if `num_insns` is `None` or for exactly `num_insns` instructions if `num_insns` is `Some`.
306    ///
307    /// Returns the final VM state when execution stops.
308    pub fn execute(
309        &self,
310        inputs: impl Into<Streams<F>>,
311        num_insns: Option<u64>,
312    ) -> Result<VmState<F, GuestMemory>, ExecutionError> {
313        let vm_state = VmState::initial(
314            &self.system_config,
315            &self.init_memory,
316            self.pc_start,
317            inputs,
318        );
319        self.execute_from_state(vm_state, num_insns)
320    }
321
322    /// Pure execution, without metering, from the given `VmState`. This function executes the
323    /// program until either termination if `num_insns` is `None` or for exactly `num_insns`
324    /// instructions if `num_insns` is `Some`.
325    ///
326    /// Returns the final VM state when execution stops.
327    pub fn execute_from_state(
328        &self,
329        from_state: VmState<F, GuestMemory>,
330        num_insns: Option<u64>,
331    ) -> Result<VmState<F, GuestMemory>, ExecutionError> {
332        let ctx = ExecutionCtx::new(num_insns);
333        let mut exec_state = VmExecState::new(from_state, ctx);
334
335        #[cfg(feature = "metrics")]
336        let start = std::time::Instant::now();
337        #[cfg(feature = "metrics")]
338        let start_instret_left = exec_state.ctx.instret_left;
339
340        run!("execute_e1", self, exec_state, ExecutionCtx);
341
342        #[cfg(feature = "metrics")]
343        {
344            let elapsed = start.elapsed();
345            let insns = start_instret_left - exec_state.ctx.instret_left;
346            tracing::info!("instructions_executed={insns}");
347            metrics::counter!("execute_e1_insns").absolute(insns);
348            metrics::gauge!("execute_e1_insn_mi/s").set(insns as f64 / elapsed.as_micros() as f64);
349        }
350        tracing::debug!("pc: {}", exec_state.vm_state.pc());
351        tracing::debug!("interpreter exit code {:?}", exec_state.exit_code);
352        tracing::debug!("num_insns {:?}", num_insns);
353
354        if num_insns.is_some() {
355            check_exit_code(exec_state.exit_code)?;
356        } else {
357            check_termination(exec_state.exit_code)?;
358        }
359        Ok(exec_state.vm_state)
360    }
361}
362
363impl<F> InterpretedInstance<F, MeteredCtx>
364where
365    F: PrimeField32,
366{
367    /// Metered execution for the given `inputs`. Execution begins from the initial
368    /// state specified by the `VmExe`. This function executes the program until termination.
369    ///
370    /// Returns the segmentation boundary data and the final VM state when execution stops.
371    pub fn execute_metered(
372        &self,
373        inputs: impl Into<Streams<F>>,
374        ctx: MeteredCtx,
375    ) -> Result<(Vec<Segment>, VmState<F, GuestMemory>), ExecutionError> {
376        let vm_state = self.create_initial_vm_state(inputs);
377        self.execute_metered_from_state(vm_state, ctx)
378    }
379
380    /// Metered execution for the given `VmState`. This function executes the program until
381    /// termination.
382    ///
383    /// Returns the segmentation boundary data and the final VM state when execution stops.
384    ///
385    /// The [MeteredCtx] can be constructed using either
386    /// [VmExecutor::build_metered_ctx](super::VmExecutor::build_metered_ctx) or
387    /// [VirtualMachine::build_metered_ctx](super::VirtualMachine::build_metered_ctx).
388    pub fn execute_metered_from_state(
389        &self,
390        from_state: VmState<F, GuestMemory>,
391        ctx: MeteredCtx,
392    ) -> Result<(Vec<Segment>, VmState<F, GuestMemory>), ExecutionError> {
393        let mut exec_state = VmExecState::new(from_state, ctx);
394
395        loop {
396            exec_state = self.execute_metered_until_suspend(exec_state)?;
397            // The execution has terminated.
398            if exec_state.exit_code.is_ok() && exec_state.exit_code.as_ref().unwrap().is_some() {
399                break;
400            }
401            if exec_state.exit_code.is_err() {
402                return Err(exec_state.exit_code.unwrap_err());
403            }
404        }
405        check_termination(exec_state.exit_code)?;
406        let VmExecState { vm_state, ctx, .. } = exec_state;
407        Ok((ctx.into_segments(), vm_state))
408    }
409    /// Executes a metered virtual machine operation starting from a given execution state until
410    /// suspension.
411    ///
412    /// This function resumes and continues execution of a guest virtual machine until either it:
413    /// - Hits a suspension trigger (e.g. out of gas or a specific halt condition). ATTENTION: when
414    ///   a suspension is triggered, the VM state is not at the boundary of the last segment.
415    ///   Instead, the VM state is slightly after the segment boundary.
416    /// - Completes its run based on the instructions or context provided.
417    ///
418    /// # Parameters
419    /// - `self`: The reference to the current executor or VM context.
420    /// - `exec_state`: A mutable `VmExecState<F, GuestMemory, MeteredCtx>` which represents the
421    ///   execution state of the virtual machine, including its program counter (`pc`), instruction
422    ///   retirement (`instret`), and execution context (`MeteredCtx`).
423    ///
424    /// # Returns
425    /// - `Ok(VmExecState<F, GuestMemory, MeteredCtx>)`: The execution state after suspension or
426    ///   normal completion.
427    /// - `Err(ExecutionError)`: If there is an error during execution, such as an invalid state or
428    ///   run-time error.
429    pub fn execute_metered_until_suspend(
430        &self,
431        mut exec_state: VmExecState<F, GuestMemory, MeteredCtx>,
432    ) -> Result<VmExecState<F, GuestMemory, MeteredCtx>, ExecutionError> {
433        #[cfg(feature = "metrics")]
434        let start = std::time::Instant::now();
435        #[cfg(feature = "metrics")]
436        let start_instret = exec_state.ctx.segmentation_ctx.instret;
437
438        // Start execution
439        run!("execute_metered", self, exec_state, MeteredCtx);
440
441        #[cfg(feature = "metrics")]
442        {
443            let elapsed = start.elapsed();
444            let insns = exec_state.ctx.segmentation_ctx.instret - start_instret;
445            tracing::info!("instructions_executed={insns}");
446            metrics::counter!("execute_metered_insns").absolute(insns);
447            metrics::gauge!("execute_metered_insn_mi/s")
448                .set(insns as f64 / elapsed.as_micros() as f64);
449        }
450        Ok(exec_state)
451    }
452}
453
454impl<F> InterpretedInstance<F, MeteredCostCtx>
455where
456    F: PrimeField32,
457{
458    /// Metered cost execution for the given `inputs`. Execution begins from the initial
459    /// state specified by the `VmExe`. This function executes the program until termination.
460    ///
461    /// Returns the trace cost and final VM state when execution stops.
462    pub fn execute_metered_cost(
463        &self,
464        inputs: impl Into<Streams<F>>,
465        ctx: MeteredCostCtx,
466    ) -> Result<(MeteredCostCtx, VmState<F, GuestMemory>), ExecutionError> {
467        let vm_state = self.create_initial_vm_state(inputs);
468        self.execute_metered_cost_from_state(vm_state, ctx)
469    }
470
471    /// Metered cost execution for the given `VmState`. This function executes the program until
472    /// termination.
473    ///
474    /// Returns the trace cost and final VM state when execution stops.
475    pub fn execute_metered_cost_from_state(
476        &self,
477        from_state: VmState<F, GuestMemory>,
478        ctx: MeteredCostCtx,
479    ) -> Result<(MeteredCostCtx, VmState<F, GuestMemory>), ExecutionError> {
480        let mut exec_state = VmExecState::new(from_state, ctx);
481
482        #[cfg(feature = "metrics")]
483        let start = std::time::Instant::now();
484        #[cfg(feature = "metrics")]
485        let start_instret = exec_state.ctx.instret;
486
487        // Start execution
488        run!("execute_metered_cost", self, exec_state, MeteredCostCtx);
489
490        #[cfg(feature = "metrics")]
491        {
492            let elapsed = start.elapsed();
493            let insns = exec_state.ctx.instret - start_instret;
494            tracing::info!("instructions_executed={insns}");
495            metrics::counter!("execute_metered_cost_insns").absolute(insns);
496            metrics::gauge!("execute_metered_cost_insn_mi/s")
497                .set(insns as f64 / elapsed.as_micros() as f64);
498        }
499
500        check_exit_code(exec_state.exit_code)?;
501        let VmExecState { ctx, vm_state, .. } = exec_state;
502        Ok((ctx, vm_state))
503    }
504}
505
506pub fn alloc_pre_compute_buf<F>(program: &Program<F>, pre_compute_max_size: usize) -> AlignedBuf {
507    let base_idx = get_pc_index(program.pc_base);
508    let padded_program_len = base_idx + program.instructions_and_debug_infos.len();
509    let buf_len = padded_program_len * pre_compute_max_size;
510    AlignedBuf::uninit(buf_len, pre_compute_max_size)
511}
512
513pub fn split_pre_compute_buf<'a, F>(
514    program: &Program<F>,
515    pre_compute_buf: &'a mut AlignedBuf,
516    pre_compute_max_size: usize,
517) -> Vec<&'a mut [u8]> {
518    let base_idx = get_pc_index(program.pc_base);
519    let padded_program_len = base_idx + program.instructions_and_debug_infos.len();
520    let buf_len = padded_program_len * pre_compute_max_size;
521    // SAFETY:
522    // - pre_compute_buf.ptr was allocated with exactly buf_len bytes
523    // - lifetime 'a ensures the returned slices don't outlive the AlignedBuf
524    let pre_compute_buf = unsafe { std::slice::from_raw_parts_mut(pre_compute_buf.ptr, buf_len) };
525    pre_compute_buf
526        .chunks_exact_mut(pre_compute_max_size)
527        .collect()
528}
529
530/// Executes using function pointers with the trampoline (loop) approach.
531///
532/// # Safety
533/// The `fn_ptrs` pointer to pre-computed buffers that outlive this function.
534#[cfg(not(feature = "tco"))]
535#[inline(always)]
536unsafe fn execute_trampoline<F: PrimeField32, Ctx: ExecutionCtxTrait>(
537    exec_state: &mut VmExecState<F, GuestMemory, Ctx>,
538    fn_ptrs: &[PreComputeInstruction<F, Ctx>],
539) {
540    while exec_state
541        .exit_code
542        .as_ref()
543        .is_ok_and(|exit_code| exit_code.is_none())
544    {
545        if Ctx::should_suspend(exec_state) {
546            tracing::debug!("stop because of should_suspend");
547            break;
548        }
549        let pc = exec_state.pc();
550        let pc_index = get_pc_index(pc);
551
552        if let Some(inst) = fn_ptrs.get(pc_index) {
553            // SAFETY: pre_compute assumed to live long enough
554            unsafe { (inst.handler)(inst.pre_compute, exec_state) };
555        } else {
556            exec_state.exit_code = Err(ExecutionError::PcOutOfBounds(pc));
557        }
558    }
559}
560
561#[inline(always)]
562pub fn get_pc_index(pc: u32) -> usize {
563    (pc / DEFAULT_PC_STEP) as usize
564}
565
566/// Bytes allocated according to the given Layout.
567/// Careful: this struct implements Send and Sync unsafely. Don't change the underlying data after
568/// initialization.git
569// @dev: This is duplicate from the openvm crate, but it doesn't seem worth importing `openvm` here
570// just for this.
571pub struct AlignedBuf {
572    pub ptr: *mut u8,
573    pub layout: Layout,
574}
575
576unsafe impl Send for AlignedBuf {}
577unsafe impl Sync for AlignedBuf {}
578
579impl AlignedBuf {
580    /// Allocate a new buffer whose start address is aligned to `align` bytes.
581    /// *NOTE* if `len` is zero then a creates new `NonNull` that is dangling and 16-byte aligned.
582    pub fn uninit(len: usize, align: usize) -> Self {
583        let layout = Layout::from_size_align(len, align).unwrap();
584        if layout.size() == 0 {
585            return Self {
586                ptr: NonNull::<u128>::dangling().as_ptr() as *mut u8,
587                layout,
588            };
589        }
590        // SAFETY: `len` is nonzero
591        let ptr = unsafe { alloc(layout) };
592        if ptr.is_null() {
593            handle_alloc_error(layout);
594        }
595        AlignedBuf { ptr, layout }
596    }
597}
598
599impl Drop for AlignedBuf {
600    fn drop(&mut self) {
601        if self.layout.size() != 0 {
602            // SAFETY: self.ptr was allocated with self.layout in AlignedBuf::uninit
603            unsafe {
604                dealloc(self.ptr, self.layout);
605            }
606        }
607    }
608}
609
610#[inline(always)]
611unsafe fn terminate_execute_e12_impl<F: PrimeField32, CTX: ExecutionCtxTrait>(
612    pre_compute: *const u8,
613    exec_state: &mut VmExecState<F, GuestMemory, CTX>,
614) {
615    let pre_compute: &TerminatePreCompute =
616        std::slice::from_raw_parts(pre_compute, size_of::<TerminatePreCompute>()).borrow();
617    exec_state.exit_code = Ok(Some(pre_compute.exit_code));
618    CTX::on_terminate(exec_state);
619}
620
621#[cfg(feature = "tco")]
622unsafe fn terminate_execute_e12_tco_handler<F: PrimeField32, CTX: ExecutionCtxTrait>(
623    interpreter: &InterpretedInstance<F, CTX>,
624    exec_state: &mut VmExecState<F, GuestMemory, CTX>,
625) {
626    let pre_compute = interpreter.get_pre_compute(exec_state.vm_state.pc());
627    terminate_execute_e12_impl(pre_compute, exec_state);
628}
629
630#[cfg(feature = "tco")]
631unsafe fn unreachable_tco_handler<F: PrimeField32, CTX>(
632    _: &InterpretedInstance<F, CTX>,
633    exec_state: &mut VmExecState<F, GuestMemory, CTX>,
634) {
635    exec_state.exit_code = Err(ExecutionError::Unreachable(exec_state.vm_state.pc()));
636}
637
638pub fn get_pre_compute_max_size<F, E: Executor<F>>(
639    program: &Program<F>,
640    inventory: &ExecutorInventory<E>,
641) -> usize {
642    program
643        .instructions_and_debug_infos
644        .iter()
645        .map(|inst_opt| {
646            if let Some((inst, _)) = inst_opt {
647                if let Some(size) = system_opcode_pre_compute_size(inst) {
648                    size
649                } else {
650                    inventory
651                        .get_executor(inst.opcode)
652                        .map(|executor| executor.pre_compute_size())
653                        .unwrap()
654                }
655            } else {
656                0
657            }
658        })
659        .max()
660        .unwrap()
661        .next_power_of_two()
662}
663
664pub fn get_metered_pre_compute_max_size<F, E: MeteredExecutor<F>>(
665    program: &Program<F>,
666    inventory: &ExecutorInventory<E>,
667) -> usize {
668    program
669        .instructions_and_debug_infos
670        .iter()
671        .map(|inst_opt| {
672            if let Some((inst, _)) = inst_opt {
673                if let Some(size) = system_opcode_pre_compute_size(inst) {
674                    size
675                } else {
676                    inventory
677                        .get_executor(inst.opcode)
678                        .map(|executor| executor.metered_pre_compute_size())
679                        .unwrap()
680                }
681            } else {
682                0
683            }
684        })
685        .max()
686        .unwrap()
687        .next_power_of_two()
688}
689
690fn system_opcode_pre_compute_size<F>(inst: &Instruction<F>) -> Option<usize> {
691    if inst.opcode == SystemOpcode::TERMINATE.global_opcode() {
692        return Some(size_of::<TerminatePreCompute>());
693    }
694    None
695}
696
697#[cfg(not(feature = "tco"))]
698pub fn get_pre_compute_instructions<F, Ctx, E>(
699    program: &Program<F>,
700    inventory: &ExecutorInventory<E>,
701    pre_compute: &mut [&mut [u8]],
702) -> Result<Vec<PreComputeInstruction<F, Ctx>>, StaticProgramError>
703where
704    F: PrimeField32,
705    Ctx: ExecutionCtxTrait,
706    E: Executor<F>,
707{
708    let unreachable_handler: ExecuteFunc<F, Ctx> = |_, exec_state| {
709        exec_state.exit_code = Err(ExecutionError::Unreachable(exec_state.pc()));
710    };
711
712    repeat_n(&None, get_pc_index(program.pc_base))
713        .chain(program.instructions_and_debug_infos.iter())
714        .zip_eq(pre_compute.iter_mut())
715        .enumerate()
716        .map(|(i, (inst_opt, buf))| {
717            // SAFETY: we cast to raw pointer and then borrow to remove the lifetime. This
718            // is safe only in the current context because `buf` comes
719            // from `pre_compute_buf` which will outlive the returned
720            // `PreComputeInstruction`s.
721            let buf: &mut [u8] = unsafe { &mut *(*buf as *mut [u8]) };
722            let pre_inst = if let Some((inst, _)) = inst_opt {
723                tracing::trace!("get_pre_compute_instruction {inst:?}");
724                let pc = i as u32 * DEFAULT_PC_STEP;
725                if let Some(handler) = get_system_opcode_handler(inst, buf) {
726                    PreComputeInstruction {
727                        handler,
728                        pre_compute: buf.as_ptr(),
729                    }
730                } else if let Some(executor) = inventory.get_executor(inst.opcode) {
731                    PreComputeInstruction {
732                        handler: executor.pre_compute(pc, inst, buf)?,
733                        pre_compute: buf.as_ptr(),
734                    }
735                } else {
736                    return Err(StaticProgramError::DisabledOperation {
737                        pc,
738                        opcode: inst.opcode,
739                    });
740                }
741            } else {
742                // Dead instruction at this pc
743                PreComputeInstruction {
744                    handler: unreachable_handler,
745                    pre_compute: buf.as_ptr(),
746                }
747            };
748            Ok(pre_inst)
749        })
750        .collect::<Result<Vec<_>, _>>()
751}
752
753#[cfg(not(feature = "tco"))]
754pub fn get_metered_pre_compute_instructions<F, Ctx, E>(
755    program: &Program<F>,
756    inventory: &ExecutorInventory<E>,
757    executor_idx_to_air_idx: &[usize],
758    pre_compute: &mut [&mut [u8]],
759) -> Result<Vec<PreComputeInstruction<F, Ctx>>, StaticProgramError>
760where
761    F: PrimeField32,
762    Ctx: MeteredExecutionCtxTrait,
763    E: MeteredExecutor<F>,
764{
765    let unreachable_handler: ExecuteFunc<F, Ctx> = |_, exec_state| {
766        exec_state.exit_code = Err(ExecutionError::Unreachable(exec_state.pc()));
767    };
768    repeat_n(&None, get_pc_index(program.pc_base))
769        .chain(program.instructions_and_debug_infos.iter())
770        .zip_eq(pre_compute.iter_mut())
771        .enumerate()
772        .map(|(i, (inst_opt, buf))| {
773            // SAFETY: we cast to raw pointer and then borrow to remove the lifetime. This
774            // is safe only in the current context because `buf` comes
775            // from `pre_compute_buf` which will outlive the returned
776            // `PreComputeInstruction`s.
777            let buf: &mut [u8] = unsafe { &mut *(*buf as *mut [u8]) };
778            let pre_inst = if let Some((inst, _)) = inst_opt {
779                tracing::trace!("get_metered_pre_compute_instruction {inst:?}");
780                let pc = program.pc_base + i as u32 * DEFAULT_PC_STEP;
781                if let Some(handler) = get_system_opcode_handler(inst, buf) {
782                    PreComputeInstruction {
783                        handler,
784                        pre_compute: buf.as_ptr(),
785                    }
786                } else if let Some(&executor_idx) = inventory.instruction_lookup.get(&inst.opcode) {
787                    let executor_idx = executor_idx as usize;
788                    let executor = inventory
789                        .executors
790                        .get(executor_idx)
791                        .expect("ExecutorInventory ensures executor_idx is in bounds");
792                    let air_idx = executor_idx_to_air_idx[executor_idx];
793                    PreComputeInstruction {
794                        handler: executor.metered_pre_compute(air_idx, pc, inst, buf)?,
795                        pre_compute: buf.as_ptr(),
796                    }
797                } else {
798                    return Err(StaticProgramError::DisabledOperation {
799                        pc,
800                        opcode: inst.opcode,
801                    });
802                }
803            } else {
804                PreComputeInstruction {
805                    handler: unreachable_handler,
806                    pre_compute: buf.as_ptr(),
807                }
808            };
809            Ok(pre_inst)
810        })
811        .collect::<Result<Vec<_>, _>>()
812}
813
814fn get_system_opcode_handler<F: PrimeField32, Ctx: ExecutionCtxTrait>(
815    inst: &Instruction<F>,
816    buf: &mut [u8],
817) -> Option<ExecuteFunc<F, Ctx>> {
818    if inst.opcode == SystemOpcode::TERMINATE.global_opcode() {
819        let pre_compute: &mut TerminatePreCompute = buf.borrow_mut();
820        pre_compute.exit_code = inst.c.as_canonical_u32();
821        return Some(terminate_execute_e12_impl);
822    }
823    None
824}
825
826/// Errors if exit code is either error or terminated with non-successful exit code.
827fn check_exit_code(exit_code: Result<Option<u32>, ExecutionError>) -> Result<(), ExecutionError> {
828    let exit_code = exit_code?;
829    if let Some(exit_code) = exit_code {
830        // This means execution did terminate
831        if exit_code != ExitCode::Success as u32 {
832            return Err(ExecutionError::FailedWithExitCode(exit_code));
833        }
834    }
835    Ok(())
836}
837
838/// Same as [check_exit_code] but errors if program did not terminate.
839pub(super) fn check_termination(
840    exit_code: Result<Option<u32>, ExecutionError>,
841) -> Result<(), ExecutionError> {
842    let did_terminate = matches!(exit_code.as_ref(), Ok(Some(_)));
843    check_exit_code(exit_code)?;
844    match did_terminate {
845        true => Ok(()),
846        false => Err(ExecutionError::DidNotTerminate),
847    }
848}