openvm_rv32im_circuit/less_than/
execution.rs

1use std::{
2    borrow::{Borrow, BorrowMut},
3    mem::size_of,
4};
5
6use openvm_circuit::{arch::*, system::memory::online::GuestMemory};
7use openvm_circuit_primitives_derive::AlignedBytesBorrow;
8use openvm_instructions::{
9    instruction::Instruction,
10    program::DEFAULT_PC_STEP,
11    riscv::{RV32_IMM_AS, RV32_REGISTER_AS, RV32_REGISTER_NUM_LIMBS},
12    LocalOpcode,
13};
14use openvm_rv32im_transpiler::LessThanOpcode;
15use openvm_stark_backend::p3_field::PrimeField32;
16
17use super::core::LessThanExecutor;
18use crate::adapters::imm_to_bytes;
19
20#[derive(AlignedBytesBorrow, Clone)]
21#[repr(C)]
22struct LessThanPreCompute {
23    c: u32,
24    a: u8,
25    b: u8,
26}
27
28impl<A, const LIMB_BITS: usize> LessThanExecutor<A, { RV32_REGISTER_NUM_LIMBS }, LIMB_BITS> {
29    #[inline(always)]
30    fn pre_compute_impl<F: PrimeField32>(
31        &self,
32        pc: u32,
33        inst: &Instruction<F>,
34        data: &mut LessThanPreCompute,
35    ) -> Result<(bool, bool), StaticProgramError> {
36        let Instruction {
37            opcode,
38            a,
39            b,
40            c,
41            d,
42            e,
43            ..
44        } = inst;
45        let e_u32 = e.as_canonical_u32();
46        if d.as_canonical_u32() != RV32_REGISTER_AS
47            || !(e_u32 == RV32_IMM_AS || e_u32 == RV32_REGISTER_AS)
48        {
49            return Err(StaticProgramError::InvalidInstruction(pc));
50        }
51        let local_opcode = LessThanOpcode::from_usize(opcode.local_opcode_idx(self.offset));
52        let is_imm = e_u32 == RV32_IMM_AS;
53        let c_u32 = c.as_canonical_u32();
54
55        *data = LessThanPreCompute {
56            c: if is_imm {
57                u32::from_le_bytes(imm_to_bytes(c_u32))
58            } else {
59                c_u32
60            },
61            a: a.as_canonical_u32() as u8,
62            b: b.as_canonical_u32() as u8,
63        };
64        Ok((is_imm, local_opcode == LessThanOpcode::SLTU))
65    }
66}
67
68macro_rules! dispatch {
69    ($execute_impl:ident, $is_imm:ident, $is_sltu:ident) => {
70        match ($is_imm, $is_sltu) {
71            (true, true) => Ok($execute_impl::<_, _, true, true>),
72            (true, false) => Ok($execute_impl::<_, _, true, false>),
73            (false, true) => Ok($execute_impl::<_, _, false, true>),
74            (false, false) => Ok($execute_impl::<_, _, false, false>),
75        }
76    };
77}
78
79impl<F, A, const LIMB_BITS: usize> Executor<F>
80    for LessThanExecutor<A, { RV32_REGISTER_NUM_LIMBS }, LIMB_BITS>
81where
82    F: PrimeField32,
83{
84    #[inline(always)]
85    fn pre_compute_size(&self) -> usize {
86        size_of::<LessThanPreCompute>()
87    }
88
89    #[cfg(not(feature = "tco"))]
90    #[inline(always)]
91    fn pre_compute<Ctx: ExecutionCtxTrait>(
92        &self,
93        pc: u32,
94        inst: &Instruction<F>,
95        data: &mut [u8],
96    ) -> Result<ExecuteFunc<F, Ctx>, StaticProgramError> {
97        let pre_compute: &mut LessThanPreCompute = data.borrow_mut();
98        let (is_imm, is_sltu) = self.pre_compute_impl(pc, inst, pre_compute)?;
99        dispatch!(execute_e1_handler, is_imm, is_sltu)
100    }
101
102    #[cfg(feature = "tco")]
103    fn handler<Ctx>(
104        &self,
105        pc: u32,
106        inst: &Instruction<F>,
107        data: &mut [u8],
108    ) -> Result<Handler<F, Ctx>, StaticProgramError>
109    where
110        Ctx: ExecutionCtxTrait,
111    {
112        let pre_compute: &mut LessThanPreCompute = data.borrow_mut();
113        let (is_imm, is_sltu) = self.pre_compute_impl(pc, inst, pre_compute)?;
114        dispatch!(execute_e1_handler, is_imm, is_sltu)
115    }
116}
117
118impl<F, A, const LIMB_BITS: usize> MeteredExecutor<F>
119    for LessThanExecutor<A, { RV32_REGISTER_NUM_LIMBS }, LIMB_BITS>
120where
121    F: PrimeField32,
122{
123    fn metered_pre_compute_size(&self) -> usize {
124        size_of::<E2PreCompute<LessThanPreCompute>>()
125    }
126
127    #[cfg(not(feature = "tco"))]
128    fn metered_pre_compute<Ctx>(
129        &self,
130        chip_idx: usize,
131        pc: u32,
132        inst: &Instruction<F>,
133        data: &mut [u8],
134    ) -> Result<ExecuteFunc<F, Ctx>, StaticProgramError>
135    where
136        Ctx: MeteredExecutionCtxTrait,
137    {
138        let pre_compute: &mut E2PreCompute<LessThanPreCompute> = data.borrow_mut();
139        pre_compute.chip_idx = chip_idx as u32;
140        let (is_imm, is_sltu) = self.pre_compute_impl(pc, inst, &mut pre_compute.data)?;
141        dispatch!(execute_e2_handler, is_imm, is_sltu)
142    }
143
144    #[cfg(feature = "tco")]
145    fn metered_handler<Ctx>(
146        &self,
147        chip_idx: usize,
148        pc: u32,
149        inst: &Instruction<F>,
150        data: &mut [u8],
151    ) -> Result<Handler<F, Ctx>, StaticProgramError>
152    where
153        Ctx: MeteredExecutionCtxTrait,
154    {
155        let pre_compute: &mut E2PreCompute<LessThanPreCompute> = data.borrow_mut();
156        pre_compute.chip_idx = chip_idx as u32;
157        let (is_imm, is_sltu) = self.pre_compute_impl(pc, inst, &mut pre_compute.data)?;
158        dispatch!(execute_e2_handler, is_imm, is_sltu)
159    }
160}
161
162#[inline(always)]
163unsafe fn execute_e12_impl<
164    F: PrimeField32,
165    CTX: ExecutionCtxTrait,
166    const E_IS_IMM: bool,
167    const IS_U32: bool,
168>(
169    pre_compute: &LessThanPreCompute,
170    instret: &mut u64,
171    pc: &mut u32,
172    exec_state: &mut VmExecState<F, GuestMemory, CTX>,
173) {
174    let rs1 = exec_state.vm_read::<u8, 4>(RV32_REGISTER_AS, pre_compute.b as u32);
175    let rs2 = if E_IS_IMM {
176        pre_compute.c.to_le_bytes()
177    } else {
178        exec_state.vm_read::<u8, 4>(RV32_REGISTER_AS, pre_compute.c)
179    };
180    let cmp_result = if IS_U32 {
181        u32::from_le_bytes(rs1) < u32::from_le_bytes(rs2)
182    } else {
183        i32::from_le_bytes(rs1) < i32::from_le_bytes(rs2)
184    };
185    let mut rd = [0u8; RV32_REGISTER_NUM_LIMBS];
186    rd[0] = cmp_result as u8;
187    exec_state.vm_write(RV32_REGISTER_AS, pre_compute.a as u32, &rd);
188
189    *pc += DEFAULT_PC_STEP;
190    *instret += 1;
191}
192
193#[create_handler]
194#[inline(always)]
195unsafe fn execute_e1_impl<
196    F: PrimeField32,
197    CTX: ExecutionCtxTrait,
198    const E_IS_IMM: bool,
199    const IS_U32: bool,
200>(
201    pre_compute: &[u8],
202    instret: &mut u64,
203    pc: &mut u32,
204    _instret_end: u64,
205    exec_state: &mut VmExecState<F, GuestMemory, CTX>,
206) {
207    let pre_compute: &LessThanPreCompute = pre_compute.borrow();
208    execute_e12_impl::<F, CTX, E_IS_IMM, IS_U32>(pre_compute, instret, pc, exec_state);
209}
210
211#[create_handler]
212#[inline(always)]
213unsafe fn execute_e2_impl<
214    F: PrimeField32,
215    CTX: MeteredExecutionCtxTrait,
216    const E_IS_IMM: bool,
217    const IS_U32: bool,
218>(
219    pre_compute: &[u8],
220    instret: &mut u64,
221    pc: &mut u32,
222    _arg: u64,
223    exec_state: &mut VmExecState<F, GuestMemory, CTX>,
224) {
225    let pre_compute: &E2PreCompute<LessThanPreCompute> = pre_compute.borrow();
226    exec_state
227        .ctx
228        .on_height_change(pre_compute.chip_idx as usize, 1);
229    execute_e12_impl::<F, CTX, E_IS_IMM, IS_U32>(&pre_compute.data, instret, pc, exec_state);
230}