1use std::borrow::{Borrow, BorrowMut};
2
3use openvm_bigint_transpiler::Rv32LessThan256Opcode;
4use openvm_circuit::{arch::*, system::memory::online::GuestMemory};
5use openvm_circuit_primitives_derive::AlignedBytesBorrow;
6use openvm_instructions::{
7 instruction::Instruction,
8 program::DEFAULT_PC_STEP,
9 riscv::{RV32_MEMORY_AS, RV32_REGISTER_AS},
10 LocalOpcode,
11};
12use openvm_rv32_adapters::Rv32HeapAdapterExecutor;
13use openvm_rv32im_circuit::LessThanExecutor;
14use openvm_rv32im_transpiler::LessThanOpcode;
15use openvm_stark_backend::p3_field::PrimeField32;
16
17use crate::{common, Rv32LessThan256Executor, INT256_NUM_LIMBS};
18
19type AdapterExecutor = Rv32HeapAdapterExecutor<2, INT256_NUM_LIMBS, INT256_NUM_LIMBS>;
20
21impl Rv32LessThan256Executor {
22 pub fn new(adapter: AdapterExecutor, offset: usize) -> Self {
23 Self(LessThanExecutor::new(adapter, offset))
24 }
25}
26
27#[derive(AlignedBytesBorrow, Clone)]
28#[repr(C)]
29struct LessThanPreCompute {
30 a: u8,
31 b: u8,
32 c: u8,
33}
34
35macro_rules! dispatch {
36 ($execute_impl:ident, $local_opcode:ident) => {
37 Ok(match $local_opcode {
38 LessThanOpcode::SLT => $execute_impl::<_, _, false>,
39 LessThanOpcode::SLTU => $execute_impl::<_, _, true>,
40 })
41 };
42}
43
44impl<F: PrimeField32> Executor<F> for Rv32LessThan256Executor {
45 fn pre_compute_size(&self) -> usize {
46 size_of::<LessThanPreCompute>()
47 }
48
49 fn pre_compute<Ctx>(
50 &self,
51 pc: u32,
52 inst: &Instruction<F>,
53 data: &mut [u8],
54 ) -> Result<ExecuteFunc<F, Ctx>, StaticProgramError>
55 where
56 Ctx: ExecutionCtxTrait,
57 {
58 let data: &mut LessThanPreCompute = data.borrow_mut();
59 let local_opcode = self.pre_compute_impl(pc, inst, data)?;
60 dispatch!(execute_e1_impl, local_opcode)
61 }
62
63 #[cfg(feature = "tco")]
64 fn handler<Ctx>(
65 &self,
66 pc: u32,
67 inst: &Instruction<F>,
68 data: &mut [u8],
69 ) -> Result<Handler<F, Ctx>, StaticProgramError>
70 where
71 Ctx: ExecutionCtxTrait,
72 {
73 let data: &mut LessThanPreCompute = data.borrow_mut();
74 let local_opcode = self.pre_compute_impl(pc, inst, data)?;
75 dispatch!(execute_e1_tco_handler, local_opcode)
76 }
77}
78
79impl<F: PrimeField32> MeteredExecutor<F> for Rv32LessThan256Executor {
80 fn metered_pre_compute_size(&self) -> usize {
81 size_of::<E2PreCompute<LessThanPreCompute>>()
82 }
83
84 fn metered_pre_compute<Ctx>(
85 &self,
86 chip_idx: usize,
87 pc: u32,
88 inst: &Instruction<F>,
89 data: &mut [u8],
90 ) -> Result<ExecuteFunc<F, Ctx>, StaticProgramError>
91 where
92 Ctx: MeteredExecutionCtxTrait,
93 {
94 let data: &mut E2PreCompute<LessThanPreCompute> = data.borrow_mut();
95 data.chip_idx = chip_idx as u32;
96 let local_opcode = self.pre_compute_impl(pc, inst, &mut data.data)?;
97 dispatch!(execute_e2_impl, local_opcode)
98 }
99
100 #[cfg(feature = "tco")]
101 fn metered_handler<Ctx>(
102 &self,
103 chip_idx: usize,
104 pc: u32,
105 inst: &Instruction<F>,
106 data: &mut [u8],
107 ) -> Result<Handler<F, Ctx>, StaticProgramError>
108 where
109 Ctx: MeteredExecutionCtxTrait,
110 {
111 let data: &mut E2PreCompute<LessThanPreCompute> = data.borrow_mut();
112 data.chip_idx = chip_idx as u32;
113 let local_opcode = self.pre_compute_impl(pc, inst, &mut data.data)?;
114 dispatch!(execute_e2_tco_handler, local_opcode)
115 }
116}
117
118#[inline(always)]
119unsafe fn execute_e12_impl<F: PrimeField32, CTX: ExecutionCtxTrait, const IS_U256: bool>(
120 pre_compute: &LessThanPreCompute,
121 vm_state: &mut VmExecState<F, GuestMemory, CTX>,
122) {
123 let rs1_ptr = vm_state.vm_read::<u8, 4>(RV32_REGISTER_AS, pre_compute.b as u32);
124 let rs2_ptr = vm_state.vm_read::<u8, 4>(RV32_REGISTER_AS, pre_compute.c as u32);
125 let rd_ptr = vm_state.vm_read::<u8, 4>(RV32_REGISTER_AS, pre_compute.a as u32);
126 let rs1 = vm_state.vm_read::<u8, INT256_NUM_LIMBS>(RV32_MEMORY_AS, u32::from_le_bytes(rs1_ptr));
127 let rs2 = vm_state.vm_read::<u8, INT256_NUM_LIMBS>(RV32_MEMORY_AS, u32::from_le_bytes(rs2_ptr));
128 let cmp_result = if IS_U256 {
129 common::u256_lt(rs1, rs2)
130 } else {
131 common::i256_lt(rs1, rs2)
132 };
133 let mut rd = [0u8; INT256_NUM_LIMBS];
134 rd[0] = cmp_result as u8;
135 vm_state.vm_write(RV32_MEMORY_AS, u32::from_le_bytes(rd_ptr), &rd);
136
137 vm_state.pc += DEFAULT_PC_STEP;
138 vm_state.instret += 1;
139}
140
141#[create_tco_handler]
142unsafe fn execute_e1_impl<F: PrimeField32, CTX: ExecutionCtxTrait, const IS_U256: bool>(
143 pre_compute: &[u8],
144 vm_state: &mut VmExecState<F, GuestMemory, CTX>,
145) {
146 let pre_compute: &LessThanPreCompute = pre_compute.borrow();
147 execute_e12_impl::<F, CTX, IS_U256>(pre_compute, vm_state);
148}
149
150#[create_tco_handler]
151unsafe fn execute_e2_impl<F: PrimeField32, CTX: MeteredExecutionCtxTrait, const IS_U256: bool>(
152 pre_compute: &[u8],
153 vm_state: &mut VmExecState<F, GuestMemory, CTX>,
154) {
155 let pre_compute: &E2PreCompute<LessThanPreCompute> = pre_compute.borrow();
156 vm_state
157 .ctx
158 .on_height_change(pre_compute.chip_idx as usize, 1);
159 execute_e12_impl::<F, CTX, IS_U256>(&pre_compute.data, vm_state);
160}
161
162impl Rv32LessThan256Executor {
163 fn pre_compute_impl<F: PrimeField32>(
164 &self,
165 pc: u32,
166 inst: &Instruction<F>,
167 data: &mut LessThanPreCompute,
168 ) -> Result<LessThanOpcode, StaticProgramError> {
169 let Instruction {
170 opcode,
171 a,
172 b,
173 c,
174 d,
175 e,
176 ..
177 } = inst;
178 let e_u32 = e.as_canonical_u32();
179 if d.as_canonical_u32() != RV32_REGISTER_AS || e_u32 != RV32_MEMORY_AS {
180 return Err(StaticProgramError::InvalidInstruction(pc));
181 }
182 *data = LessThanPreCompute {
183 a: a.as_canonical_u32() as u8,
184 b: b.as_canonical_u32() as u8,
185 c: c.as_canonical_u32() as u8,
186 };
187 let local_opcode = LessThanOpcode::from_usize(
188 opcode.local_opcode_idx(Rv32LessThan256Opcode::CLASS_OFFSET),
189 );
190 Ok(local_opcode)
191 }
192}