openvm_rv32im_circuit/less_than/
execution.rs1use std::{
2 borrow::{Borrow, BorrowMut},
3 mem::size_of,
4};
5
6use openvm_circuit::{arch::*, system::memory::online::GuestMemory};
7use openvm_circuit_primitives_derive::AlignedBytesBorrow;
8use openvm_instructions::{
9 instruction::Instruction,
10 program::DEFAULT_PC_STEP,
11 riscv::{RV32_IMM_AS, RV32_REGISTER_AS, RV32_REGISTER_NUM_LIMBS},
12 LocalOpcode,
13};
14use openvm_rv32im_transpiler::LessThanOpcode;
15use openvm_stark_backend::p3_field::PrimeField32;
16
17use super::core::LessThanExecutor;
18use crate::adapters::imm_to_bytes;
19
20#[derive(AlignedBytesBorrow, Clone)]
21#[repr(C)]
22struct LessThanPreCompute {
23 c: u32,
24 a: u8,
25 b: u8,
26}
27
28impl<A, const LIMB_BITS: usize> LessThanExecutor<A, { RV32_REGISTER_NUM_LIMBS }, LIMB_BITS> {
29 #[inline(always)]
30 fn pre_compute_impl<F: PrimeField32>(
31 &self,
32 pc: u32,
33 inst: &Instruction<F>,
34 data: &mut LessThanPreCompute,
35 ) -> Result<(bool, bool), StaticProgramError> {
36 let Instruction {
37 opcode,
38 a,
39 b,
40 c,
41 d,
42 e,
43 ..
44 } = inst;
45 let e_u32 = e.as_canonical_u32();
46 if d.as_canonical_u32() != RV32_REGISTER_AS
47 || !(e_u32 == RV32_IMM_AS || e_u32 == RV32_REGISTER_AS)
48 {
49 return Err(StaticProgramError::InvalidInstruction(pc));
50 }
51 let local_opcode = LessThanOpcode::from_usize(opcode.local_opcode_idx(self.offset));
52 let is_imm = e_u32 == RV32_IMM_AS;
53 let c_u32 = c.as_canonical_u32();
54
55 *data = LessThanPreCompute {
56 c: if is_imm {
57 u32::from_le_bytes(imm_to_bytes(c_u32))
58 } else {
59 c_u32
60 },
61 a: a.as_canonical_u32() as u8,
62 b: b.as_canonical_u32() as u8,
63 };
64 Ok((is_imm, local_opcode == LessThanOpcode::SLTU))
65 }
66}
67
68macro_rules! dispatch {
69 ($execute_impl:ident, $is_imm:ident, $is_sltu:ident) => {
70 match ($is_imm, $is_sltu) {
71 (true, true) => Ok($execute_impl::<_, _, true, true>),
72 (true, false) => Ok($execute_impl::<_, _, true, false>),
73 (false, true) => Ok($execute_impl::<_, _, false, true>),
74 (false, false) => Ok($execute_impl::<_, _, false, false>),
75 }
76 };
77}
78
79impl<F, A, const LIMB_BITS: usize> Executor<F>
80 for LessThanExecutor<A, { RV32_REGISTER_NUM_LIMBS }, LIMB_BITS>
81where
82 F: PrimeField32,
83{
84 #[inline(always)]
85 fn pre_compute_size(&self) -> usize {
86 size_of::<LessThanPreCompute>()
87 }
88
89 #[inline(always)]
90 fn pre_compute<Ctx: ExecutionCtxTrait>(
91 &self,
92 pc: u32,
93 inst: &Instruction<F>,
94 data: &mut [u8],
95 ) -> Result<ExecuteFunc<F, Ctx>, StaticProgramError> {
96 let pre_compute: &mut LessThanPreCompute = data.borrow_mut();
97 let (is_imm, is_sltu) = self.pre_compute_impl(pc, inst, pre_compute)?;
98 dispatch!(execute_e1_impl, is_imm, is_sltu)
99 }
100
101 #[cfg(feature = "tco")]
102 fn handler<Ctx>(
103 &self,
104 pc: u32,
105 inst: &Instruction<F>,
106 data: &mut [u8],
107 ) -> Result<Handler<F, Ctx>, StaticProgramError>
108 where
109 Ctx: ExecutionCtxTrait,
110 {
111 let pre_compute: &mut LessThanPreCompute = data.borrow_mut();
112 let (is_imm, is_sltu) = self.pre_compute_impl(pc, inst, pre_compute)?;
113 dispatch!(execute_e1_tco_handler, is_imm, is_sltu)
114 }
115}
116
117impl<F, A, const LIMB_BITS: usize> MeteredExecutor<F>
118 for LessThanExecutor<A, { RV32_REGISTER_NUM_LIMBS }, LIMB_BITS>
119where
120 F: PrimeField32,
121{
122 fn metered_pre_compute_size(&self) -> usize {
123 size_of::<E2PreCompute<LessThanPreCompute>>()
124 }
125
126 fn metered_pre_compute<Ctx>(
127 &self,
128 chip_idx: usize,
129 pc: u32,
130 inst: &Instruction<F>,
131 data: &mut [u8],
132 ) -> Result<ExecuteFunc<F, Ctx>, StaticProgramError>
133 where
134 Ctx: MeteredExecutionCtxTrait,
135 {
136 let pre_compute: &mut E2PreCompute<LessThanPreCompute> = data.borrow_mut();
137 pre_compute.chip_idx = chip_idx as u32;
138 let (is_imm, is_sltu) = self.pre_compute_impl(pc, inst, &mut pre_compute.data)?;
139 dispatch!(execute_e2_impl, is_imm, is_sltu)
140 }
141
142 #[cfg(feature = "tco")]
143 fn metered_handler<Ctx>(
144 &self,
145 chip_idx: usize,
146 pc: u32,
147 inst: &Instruction<F>,
148 data: &mut [u8],
149 ) -> Result<Handler<F, Ctx>, StaticProgramError>
150 where
151 Ctx: MeteredExecutionCtxTrait,
152 {
153 let pre_compute: &mut E2PreCompute<LessThanPreCompute> = data.borrow_mut();
154 pre_compute.chip_idx = chip_idx as u32;
155 let (is_imm, is_sltu) = self.pre_compute_impl(pc, inst, &mut pre_compute.data)?;
156 dispatch!(execute_e2_tco_handler, is_imm, is_sltu)
157 }
158}
159
160unsafe fn execute_e12_impl<
161 F: PrimeField32,
162 CTX: ExecutionCtxTrait,
163 const E_IS_IMM: bool,
164 const IS_U32: bool,
165>(
166 pre_compute: &LessThanPreCompute,
167 vm_state: &mut VmExecState<F, GuestMemory, CTX>,
168) {
169 let rs1 = vm_state.vm_read::<u8, 4>(RV32_REGISTER_AS, pre_compute.b as u32);
170 let rs2 = if E_IS_IMM {
171 pre_compute.c.to_le_bytes()
172 } else {
173 vm_state.vm_read::<u8, 4>(RV32_REGISTER_AS, pre_compute.c)
174 };
175 let cmp_result = if IS_U32 {
176 u32::from_le_bytes(rs1) < u32::from_le_bytes(rs2)
177 } else {
178 i32::from_le_bytes(rs1) < i32::from_le_bytes(rs2)
179 };
180 let mut rd = [0u8; RV32_REGISTER_NUM_LIMBS];
181 rd[0] = cmp_result as u8;
182 vm_state.vm_write(RV32_REGISTER_AS, pre_compute.a as u32, &rd);
183
184 vm_state.pc += DEFAULT_PC_STEP;
185 vm_state.instret += 1;
186}
187
188#[create_tco_handler]
189unsafe fn execute_e1_impl<
190 F: PrimeField32,
191 CTX: ExecutionCtxTrait,
192 const E_IS_IMM: bool,
193 const IS_U32: bool,
194>(
195 pre_compute: &[u8],
196 vm_state: &mut VmExecState<F, GuestMemory, CTX>,
197) {
198 let pre_compute: &LessThanPreCompute = pre_compute.borrow();
199 execute_e12_impl::<F, CTX, E_IS_IMM, IS_U32>(pre_compute, vm_state);
200}
201
202#[create_tco_handler]
203unsafe fn execute_e2_impl<
204 F: PrimeField32,
205 CTX: MeteredExecutionCtxTrait,
206 const E_IS_IMM: bool,
207 const IS_U32: bool,
208>(
209 pre_compute: &[u8],
210 vm_state: &mut VmExecState<F, GuestMemory, CTX>,
211) {
212 let pre_compute: &E2PreCompute<LessThanPreCompute> = pre_compute.borrow();
213 vm_state
214 .ctx
215 .on_height_change(pre_compute.chip_idx as usize, 1);
216 execute_e12_impl::<F, CTX, E_IS_IMM, IS_U32>(&pre_compute.data, vm_state);
217}