openvm_rv32im_circuit/jalr/
execution.rs1use std::{
2 borrow::{Borrow, BorrowMut},
3 mem::size_of,
4};
5
6use openvm_circuit::{arch::*, system::memory::online::GuestMemory};
7use openvm_circuit_primitives_derive::AlignedBytesBorrow;
8use openvm_instructions::{
9 instruction::Instruction,
10 program::{DEFAULT_PC_STEP, PC_BITS},
11 riscv::RV32_REGISTER_AS,
12};
13use openvm_stark_backend::p3_field::PrimeField32;
14
15use super::core::Rv32JalrExecutor;
16
17#[derive(AlignedBytesBorrow, Clone)]
18#[repr(C)]
19struct JalrPreCompute {
20 imm_extended: u32,
21 a: u8,
22 b: u8,
23}
24
25impl<A> Rv32JalrExecutor<A> {
26 fn pre_compute_impl<F: PrimeField32>(
28 &self,
29 pc: u32,
30 inst: &Instruction<F>,
31 data: &mut JalrPreCompute,
32 ) -> Result<bool, StaticProgramError> {
33 let imm_extended = inst.c.as_canonical_u32() + inst.g.as_canonical_u32() * 0xffff0000;
34 if inst.d.as_canonical_u32() != RV32_REGISTER_AS {
35 return Err(StaticProgramError::InvalidInstruction(pc));
36 }
37 *data = JalrPreCompute {
38 imm_extended,
39 a: inst.a.as_canonical_u32() as u8,
40 b: inst.b.as_canonical_u32() as u8,
41 };
42 let enabled = !inst.f.is_zero();
43 Ok(enabled)
44 }
45}
46
47macro_rules! dispatch {
48 ($execute_impl:ident, $enabled:ident) => {
49 if $enabled {
50 Ok($execute_impl::<_, _, true>)
51 } else {
52 Ok($execute_impl::<_, _, false>)
53 }
54 };
55}
56
57impl<F, A> Executor<F> for Rv32JalrExecutor<A>
58where
59 F: PrimeField32,
60{
61 #[inline(always)]
62 fn pre_compute_size(&self) -> usize {
63 size_of::<JalrPreCompute>()
64 }
65 #[cfg(not(feature = "tco"))]
66 #[inline(always)]
67 fn pre_compute<Ctx: ExecutionCtxTrait>(
68 &self,
69 pc: u32,
70 inst: &Instruction<F>,
71 data: &mut [u8],
72 ) -> Result<ExecuteFunc<F, Ctx>, StaticProgramError> {
73 let data: &mut JalrPreCompute = data.borrow_mut();
74 let enabled = self.pre_compute_impl(pc, inst, data)?;
75 dispatch!(execute_e1_handler, enabled)
76 }
77
78 #[cfg(feature = "tco")]
79 fn handler<Ctx>(
80 &self,
81 pc: u32,
82 inst: &Instruction<F>,
83 data: &mut [u8],
84 ) -> Result<Handler<F, Ctx>, StaticProgramError>
85 where
86 Ctx: ExecutionCtxTrait,
87 {
88 let data: &mut JalrPreCompute = data.borrow_mut();
89 let enabled = self.pre_compute_impl(pc, inst, data)?;
90 dispatch!(execute_e1_handler, enabled)
91 }
92}
93
94impl<F, A> MeteredExecutor<F> for Rv32JalrExecutor<A>
95where
96 F: PrimeField32,
97{
98 fn metered_pre_compute_size(&self) -> usize {
99 size_of::<E2PreCompute<JalrPreCompute>>()
100 }
101
102 #[cfg(not(feature = "tco"))]
103 fn metered_pre_compute<Ctx>(
104 &self,
105 chip_idx: usize,
106 pc: u32,
107 inst: &Instruction<F>,
108 data: &mut [u8],
109 ) -> Result<ExecuteFunc<F, Ctx>, StaticProgramError>
110 where
111 Ctx: MeteredExecutionCtxTrait,
112 {
113 let data: &mut E2PreCompute<JalrPreCompute> = data.borrow_mut();
114 data.chip_idx = chip_idx as u32;
115 let enabled = self.pre_compute_impl(pc, inst, &mut data.data)?;
116 dispatch!(execute_e2_handler, enabled)
117 }
118
119 #[cfg(feature = "tco")]
120 fn metered_handler<Ctx>(
121 &self,
122 chip_idx: usize,
123 pc: u32,
124 inst: &Instruction<F>,
125 data: &mut [u8],
126 ) -> Result<Handler<F, Ctx>, StaticProgramError>
127 where
128 Ctx: MeteredExecutionCtxTrait,
129 {
130 let data: &mut E2PreCompute<JalrPreCompute> = data.borrow_mut();
131 data.chip_idx = chip_idx as u32;
132 let enabled = self.pre_compute_impl(pc, inst, &mut data.data)?;
133 dispatch!(execute_e2_handler, enabled)
134 }
135}
136
137#[inline(always)]
138unsafe fn execute_e12_impl<F: PrimeField32, CTX: ExecutionCtxTrait, const ENABLED: bool>(
139 pre_compute: &JalrPreCompute,
140 instret: &mut u64,
141 pc: &mut u32,
142 exec_state: &mut VmExecState<F, GuestMemory, CTX>,
143) {
144 let rs1 = exec_state.vm_read::<u8, 4>(RV32_REGISTER_AS, pre_compute.b as u32);
145 let rs1 = u32::from_le_bytes(rs1);
146 let to_pc = rs1.wrapping_add(pre_compute.imm_extended);
147 let to_pc = to_pc - (to_pc & 1);
148 debug_assert!(to_pc < (1 << PC_BITS));
149 let rd = (*pc + DEFAULT_PC_STEP).to_le_bytes();
150
151 if ENABLED {
152 exec_state.vm_write(RV32_REGISTER_AS, pre_compute.a as u32, &rd);
153 }
154
155 *pc = to_pc;
156 *instret += 1;
157}
158
159#[create_handler]
160#[inline(always)]
161unsafe fn execute_e1_impl<F: PrimeField32, CTX: ExecutionCtxTrait, const ENABLED: bool>(
162 pre_compute: &[u8],
163 instret: &mut u64,
164 pc: &mut u32,
165 _instret_end: u64,
166 exec_state: &mut VmExecState<F, GuestMemory, CTX>,
167) {
168 let pre_compute: &JalrPreCompute = pre_compute.borrow();
169 execute_e12_impl::<F, CTX, ENABLED>(pre_compute, instret, pc, exec_state);
170}
171
172#[create_handler]
173#[inline(always)]
174unsafe fn execute_e2_impl<F: PrimeField32, CTX: MeteredExecutionCtxTrait, const ENABLED: bool>(
175 pre_compute: &[u8],
176 instret: &mut u64,
177 pc: &mut u32,
178 _arg: u64,
179 exec_state: &mut VmExecState<F, GuestMemory, CTX>,
180) {
181 let pre_compute: &E2PreCompute<JalrPreCompute> = pre_compute.borrow();
182 exec_state
183 .ctx
184 .on_height_change(pre_compute.chip_idx as usize, 1);
185 execute_e12_impl::<F, CTX, ENABLED>(&pre_compute.data, instret, pc, exec_state);
186}