openvm_rv32im_circuit/hintstore/
execution.rs1use std::{
2 borrow::{Borrow, BorrowMut},
3 mem::size_of,
4};
5
6use openvm_circuit::{arch::*, system::memory::online::GuestMemory};
7use openvm_circuit_primitives_derive::AlignedBytesBorrow;
8use openvm_instructions::{
9 instruction::Instruction,
10 program::DEFAULT_PC_STEP,
11 riscv::{RV32_MEMORY_AS, RV32_REGISTER_AS, RV32_REGISTER_NUM_LIMBS},
12 LocalOpcode,
13};
14use openvm_rv32im_transpiler::{
15 Rv32HintStoreOpcode,
16 Rv32HintStoreOpcode::{HINT_BUFFER, HINT_STOREW},
17};
18use openvm_stark_backend::p3_field::PrimeField32;
19
20use super::Rv32HintStoreExecutor;
21
22#[derive(AlignedBytesBorrow, Clone)]
23#[repr(C)]
24struct HintStorePreCompute {
25 c: u32,
26 a: u8,
27 b: u8,
28}
29
30impl Rv32HintStoreExecutor {
31 #[inline(always)]
32 fn pre_compute_impl<F: PrimeField32>(
33 &self,
34 pc: u32,
35 inst: &Instruction<F>,
36 data: &mut HintStorePreCompute,
37 ) -> Result<Rv32HintStoreOpcode, StaticProgramError> {
38 let &Instruction {
39 opcode,
40 a,
41 b,
42 c,
43 d,
44 e,
45 ..
46 } = inst;
47 if d.as_canonical_u32() != RV32_REGISTER_AS || e.as_canonical_u32() != RV32_MEMORY_AS {
48 return Err(StaticProgramError::InvalidInstruction(pc));
49 }
50 *data = {
51 HintStorePreCompute {
52 c: c.as_canonical_u32(),
53 a: a.as_canonical_u32() as u8,
54 b: b.as_canonical_u32() as u8,
55 }
56 };
57 Ok(Rv32HintStoreOpcode::from_usize(
58 opcode.local_opcode_idx(self.offset),
59 ))
60 }
61}
62
63macro_rules! dispatch {
64 ($execute_impl:ident, $local_opcode:ident) => {
65 match $local_opcode {
66 HINT_STOREW => Ok($execute_impl::<_, _, true>),
67 HINT_BUFFER => Ok($execute_impl::<_, _, false>),
68 }
69 };
70}
71
72impl<F> Executor<F> for Rv32HintStoreExecutor
73where
74 F: PrimeField32,
75{
76 #[inline(always)]
77 fn pre_compute_size(&self) -> usize {
78 size_of::<HintStorePreCompute>()
79 }
80
81 fn pre_compute<Ctx: ExecutionCtxTrait>(
82 &self,
83 pc: u32,
84 inst: &Instruction<F>,
85 data: &mut [u8],
86 ) -> Result<ExecuteFunc<F, Ctx>, StaticProgramError> {
87 let pre_compute: &mut HintStorePreCompute = data.borrow_mut();
88 let local_opcode = self.pre_compute_impl(pc, inst, pre_compute)?;
89 dispatch!(execute_e1_impl, local_opcode)
90 }
91
92 #[cfg(feature = "tco")]
93 fn handler<Ctx>(
94 &self,
95 pc: u32,
96 inst: &Instruction<F>,
97 data: &mut [u8],
98 ) -> Result<Handler<F, Ctx>, StaticProgramError>
99 where
100 Ctx: ExecutionCtxTrait,
101 {
102 let pre_compute: &mut HintStorePreCompute = data.borrow_mut();
103 let local_opcode = self.pre_compute_impl(pc, inst, pre_compute)?;
104 dispatch!(execute_e1_tco_handler, local_opcode)
105 }
106}
107
108impl<F> MeteredExecutor<F> for Rv32HintStoreExecutor
109where
110 F: PrimeField32,
111{
112 fn metered_pre_compute_size(&self) -> usize {
113 size_of::<E2PreCompute<HintStorePreCompute>>()
114 }
115
116 fn metered_pre_compute<Ctx>(
117 &self,
118 chip_idx: usize,
119 pc: u32,
120 inst: &Instruction<F>,
121 data: &mut [u8],
122 ) -> Result<ExecuteFunc<F, Ctx>, StaticProgramError>
123 where
124 Ctx: MeteredExecutionCtxTrait,
125 {
126 let pre_compute: &mut E2PreCompute<HintStorePreCompute> = data.borrow_mut();
127 pre_compute.chip_idx = chip_idx as u32;
128 let local_opcode = self.pre_compute_impl(pc, inst, &mut pre_compute.data)?;
129 dispatch!(execute_e2_impl, local_opcode)
130 }
131
132 #[cfg(feature = "tco")]
133 fn metered_handler<Ctx>(
134 &self,
135 chip_idx: usize,
136 pc: u32,
137 inst: &Instruction<F>,
138 data: &mut [u8],
139 ) -> Result<Handler<F, Ctx>, StaticProgramError>
140 where
141 Ctx: MeteredExecutionCtxTrait,
142 {
143 let pre_compute: &mut E2PreCompute<HintStorePreCompute> = data.borrow_mut();
144 pre_compute.chip_idx = chip_idx as u32;
145 let local_opcode = self.pre_compute_impl(pc, inst, &mut pre_compute.data)?;
146 dispatch!(execute_e2_tco_handler, local_opcode)
147 }
148}
149
150#[inline(always)]
152unsafe fn execute_e12_impl<F: PrimeField32, CTX: ExecutionCtxTrait, const IS_HINT_STOREW: bool>(
153 pre_compute: &HintStorePreCompute,
154 vm_state: &mut VmExecState<F, GuestMemory, CTX>,
155) -> u32 {
156 let mem_ptr_limbs = vm_state.vm_read::<u8, 4>(RV32_REGISTER_AS, pre_compute.b as u32);
157 let mem_ptr = u32::from_le_bytes(mem_ptr_limbs);
158
159 let num_words = if IS_HINT_STOREW {
160 1
161 } else {
162 let num_words_limbs = vm_state.vm_read::<u8, 4>(RV32_REGISTER_AS, pre_compute.a as u32);
163 u32::from_le_bytes(num_words_limbs)
164 };
165 debug_assert_ne!(num_words, 0);
166
167 if vm_state.streams.hint_stream.len() < RV32_REGISTER_NUM_LIMBS * num_words as usize {
168 vm_state.exit_code = Err(ExecutionError::HintOutOfBounds { pc: vm_state.pc });
169 return 0;
170 }
171
172 for word_index in 0..num_words {
173 let data: [u8; RV32_REGISTER_NUM_LIMBS] = std::array::from_fn(|_| {
174 vm_state
175 .streams
176 .hint_stream
177 .pop_front()
178 .unwrap()
179 .as_canonical_u32() as u8
180 });
181 vm_state.vm_write(
182 RV32_MEMORY_AS,
183 mem_ptr + (RV32_REGISTER_NUM_LIMBS as u32 * word_index),
184 &data,
185 );
186 }
187
188 vm_state.pc = vm_state.pc.wrapping_add(DEFAULT_PC_STEP);
189 vm_state.instret += 1;
190 num_words
191}
192
193#[create_tco_handler]
194unsafe fn execute_e1_impl<F: PrimeField32, CTX: ExecutionCtxTrait, const IS_HINT_STOREW: bool>(
195 pre_compute: &[u8],
196 vm_state: &mut VmExecState<F, GuestMemory, CTX>,
197) {
198 let pre_compute: &HintStorePreCompute = pre_compute.borrow();
199 execute_e12_impl::<F, CTX, IS_HINT_STOREW>(pre_compute, vm_state);
200}
201
202#[create_tco_handler]
203unsafe fn execute_e2_impl<
204 F: PrimeField32,
205 CTX: MeteredExecutionCtxTrait,
206 const IS_HINT_STOREW: bool,
207>(
208 pre_compute: &[u8],
209 vm_state: &mut VmExecState<F, GuestMemory, CTX>,
210) {
211 let pre_compute: &E2PreCompute<HintStorePreCompute> = pre_compute.borrow();
212 let height_delta = execute_e12_impl::<F, CTX, IS_HINT_STOREW>(&pre_compute.data, vm_state);
213 vm_state
214 .ctx
215 .on_height_change(pre_compute.chip_idx as usize, height_delta);
216}