1use openvm_circuit::arch::instructions::program::Program;
2use openvm_instructions::{
3 instruction::{DebugInfo, Instruction},
4 program::{DEFAULT_MAX_NUM_PUBLIC_VALUES, DEFAULT_PC_STEP},
5 LocalOpcode, PhantomDiscriminant, PublishOpcode, SysPhantom, SystemOpcode, VmOpcode,
6};
7use openvm_rv32im_transpiler::BranchEqualOpcode;
8use openvm_stark_backend::p3_field::{ExtensionField, PrimeField32, PrimeField64};
9use serde::{Deserialize, Serialize};
10
11use crate::{
12 asm::{AsmInstruction, AssemblyCode},
13 FieldArithmeticOpcode, FieldExtensionOpcode, FriOpcode, NativeBranchEqualOpcode,
14 NativeJalOpcode, NativeLoadStore4Opcode, NativeLoadStoreOpcode, NativePhantom,
15 NativeRangeCheckOpcode, Poseidon2Opcode, VerifyBatchOpcode,
16};
17
18#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
19pub struct CompilerOptions {
20 pub word_size: usize,
22 pub enable_cycle_tracker: bool,
23}
24
25impl Default for CompilerOptions {
26 fn default() -> Self {
27 CompilerOptions {
28 word_size: 8,
29 enable_cycle_tracker: false,
30 }
31 }
32}
33
34impl CompilerOptions {
35 pub fn opcode_with_offset<Opcode: LocalOpcode>(&self, opcode: Opcode) -> VmOpcode {
36 let offset = Opcode::CLASS_OFFSET;
37 VmOpcode::from_usize(offset + opcode.local_usize())
38 }
39 pub fn with_cycle_tracker(mut self) -> Self {
40 self.enable_cycle_tracker = true;
41 self
42 }
43}
44
45fn inst<F: PrimeField64>(opcode: VmOpcode, a: F, b: F, c: F, d: AS, e: AS) -> Instruction<F> {
46 Instruction {
47 opcode,
48 a,
49 b,
50 c,
51 d: d.to_field(),
52 e: e.to_field(),
53 f: F::ZERO,
54 g: F::ZERO,
55 }
56}
57
58#[allow(clippy::too_many_arguments)]
59fn inst_med<F: PrimeField64>(
60 opcode: VmOpcode,
61 a: F,
62 b: F,
63 c: F,
64 d: AS,
65 e: AS,
66 f: AS,
67) -> Instruction<F> {
68 Instruction {
69 opcode,
70 a,
71 b,
72 c,
73 d: d.to_field(),
74 e: e.to_field(),
75 f: f.to_field(),
76 g: F::ZERO,
77 }
78}
79
80#[derive(Clone, Copy)]
81#[repr(u8)]
82pub enum AS {
83 Immediate = 0,
84 Native = 4,
85}
86
87impl AS {
88 fn to_field<F: PrimeField64>(self) -> F {
89 match self {
90 AS::Immediate => F::ZERO,
91 AS::Native => F::from_canonical_u8(AS::Native as u8),
92 }
93 }
94}
95
96fn i32_f<F: PrimeField32>(x: i32) -> F {
97 let modulus = F::ORDER_U32;
98 assert!(x < modulus as i32 && x >= -(modulus as i32));
99 if x < 0 {
100 -F::from_canonical_u32((-x) as u32)
101 } else {
102 F::from_canonical_u32(x as u32)
103 }
104}
105
106fn convert_instruction<F: PrimeField32, EF: ExtensionField<F>>(
108 instruction: AsmInstruction<F, EF>,
109 debug_info: Option<DebugInfo>,
110 pc: F,
111 labels: impl Fn(F) -> F,
112 options: &CompilerOptions,
113) -> Program<F> {
114 let instructions = match instruction {
115 AsmInstruction::LoadFI(dst, src, index, size, offset) => vec![
116 inst(
118 options.opcode_with_offset(NativeLoadStoreOpcode::LOADW),
119 i32_f(dst),
120 index * size + offset,
121 i32_f(src),
122 AS::Native,
123 AS::Native,
124 ),
125 ],
126 AsmInstruction::LoadEI(dst, src, index, size, offset) => vec![
127 inst(
129 options.opcode_with_offset(NativeLoadStore4Opcode(NativeLoadStoreOpcode::LOADW)),
130 i32_f(dst),
131 index * size + offset,
132 i32_f(src),
133 AS::Native,
134 AS::Native,
135 ),
136 ],
137 AsmInstruction::StoreFI(val, addr, index, size, offset) => vec![
138 inst(
140 options.opcode_with_offset(NativeLoadStoreOpcode::STOREW),
141 i32_f(val),
142 index * size + offset,
143 i32_f(addr),
144 AS::Native,
145 AS::Native,
146 ),
147 ],
148 AsmInstruction::StoreEI(val, addr, index, size, offset) => vec![
149 inst(
151 options.opcode_with_offset(NativeLoadStore4Opcode(NativeLoadStoreOpcode::STOREW)),
152 i32_f(val),
153 index * size + offset,
154 i32_f(addr),
155 AS::Native,
156 AS::Native,
157 ),
158 ],
159 AsmInstruction::Jump(dst, label) => {
160 vec![
161 inst(
163 options.opcode_with_offset(NativeJalOpcode::JAL),
164 i32_f(dst),
165 labels(label) - pc,
166 F::ZERO,
167 AS::Native,
168 AS::Immediate,
169 ),
170 ]
171 }
172 AsmInstruction::Bne(label, lhs, rhs) => vec![
173 inst(
175 options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BNE)),
176 i32_f(lhs),
177 i32_f(rhs),
178 labels(label) - pc,
179 AS::Native,
180 AS::Native,
181 ),
182 ],
183 AsmInstruction::BneI(label, lhs, rhs) => vec![
184 inst(
186 options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BNE)),
187 i32_f(lhs),
188 rhs,
189 labels(label) - pc,
190 AS::Native,
191 AS::Immediate,
192 ),
193 ],
194 AsmInstruction::Beq(label, lhs, rhs) => vec![
195 inst(
197 options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BEQ)),
198 i32_f(lhs),
199 i32_f(rhs),
200 labels(label) - pc,
201 AS::Native,
202 AS::Native,
203 ),
204 ],
205 AsmInstruction::BeqI(label, lhs, rhs) => vec![
206 inst(
208 options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BEQ)),
209 i32_f(lhs),
210 rhs,
211 labels(label) - pc,
212 AS::Native,
213 AS::Immediate,
214 ),
215 ],
216 AsmInstruction::BneE(label, lhs, rhs) => (0..EF::D)
217 .map(|i|
218 inst(
220 options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BNE)),
221 i32_f(lhs + (i as i32)),
222 i32_f(rhs + (i as i32)),
223 labels(label) - (pc + F::from_canonical_usize(i * DEFAULT_PC_STEP as usize)),
224 AS::Native,
225 AS::Native,
226 ))
227 .collect(),
228 AsmInstruction::BneEI(label, lhs, rhs) => (0..EF::D)
229 .map(|i|
230 inst(
232 options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BNE)),
233 i32_f(lhs + (i as i32)),
234 rhs.as_base_slice()[i],
235 labels(label) - (pc + F::from_canonical_usize(i * DEFAULT_PC_STEP as usize)),
236 AS::Native,
237 AS::Immediate,
238 ))
239 .collect(),
240 AsmInstruction::BeqE(label, lhs, rhs) => (0..EF::D)
241 .rev()
242 .map(|i|
243 inst(
245 if i == 0 { options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BEQ)) } else { options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BNE)) },
246 i32_f(lhs + (i as i32)),
247 i32_f(rhs + (i as i32)),
248 if i == 0 {
249 labels(label) - (pc + F::from_canonical_usize((EF::D - 1) * DEFAULT_PC_STEP as usize))
250 } else {
251 F::from_canonical_usize((i + 1) * DEFAULT_PC_STEP as usize)
252 },
253 AS::Native,
254 AS::Native,
255 ))
256 .collect(),
257 AsmInstruction::BeqEI(label, lhs, rhs) => (0..EF::D)
258 .rev()
259 .map(|i|
260 inst(
262 if i == 0 { options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BEQ)) } else { options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BNE)) },
263 i32_f(lhs + (i as i32)),
264 rhs.as_base_slice()[i],
265 if i == 0 {
266 labels(label) - (pc + F::from_canonical_usize((EF::D - 1) * DEFAULT_PC_STEP as usize))
267 } else {
268 F::from_canonical_usize((i + 1) * DEFAULT_PC_STEP as usize)
269 },
270 AS::Native,
271 AS::Immediate,
272 ))
273 .collect(),
274 AsmInstruction::Trap => vec![
275 Instruction::phantom(PhantomDiscriminant(SysPhantom::DebugPanic as u16), F::ZERO, F::ZERO, 0),
276 inst(
278 options.opcode_with_offset(SystemOpcode::TERMINATE),
279 F::ZERO,
280 F::ZERO,
281 F::ONE,
282 AS::Immediate,
283 AS::Immediate,
284 ),
285 ],
286 AsmInstruction::Halt => vec![
287 inst(
289 options.opcode_with_offset(SystemOpcode::TERMINATE),
290 F::ZERO,
291 F::ZERO,
292 F::ZERO,
293 AS::Immediate,
294 AS::Immediate,
295 ),
296 ],
297 AsmInstruction::HintInputVec() => vec![
298 Instruction::phantom(PhantomDiscriminant(NativePhantom::HintInput as u16), F::ZERO, F::ZERO, 0)
299 ],
300 AsmInstruction::HintFelt() => vec![
301 Instruction::phantom(PhantomDiscriminant(NativePhantom::HintFelt as u16), F::ZERO, F::ZERO, 0)
302 ],
303 AsmInstruction::HintBits(src, len) => vec![
304 Instruction::phantom(PhantomDiscriminant(NativePhantom::HintBits as u16), i32_f(src), F::from_canonical_u32(len), AS::Native as u16)
305 ],
306 AsmInstruction::HintLoad() => vec![
307 Instruction::phantom(PhantomDiscriminant(NativePhantom::HintLoad as u16), F::ZERO, F::ZERO, 0)
308 ],
309 AsmInstruction::StoreHintWordI(val, offset) => vec![inst(
310 options.opcode_with_offset(NativeLoadStoreOpcode::HINT_STOREW),
311 F::ZERO,
312 offset,
313 i32_f(val),
314 AS::Native,
315 AS::Native,
316 )],
317 AsmInstruction::StoreHintExtI(val, offset) => vec![inst(
318 options.opcode_with_offset(NativeLoadStore4Opcode(NativeLoadStoreOpcode::HINT_STOREW)),
319 F::ZERO,
320 offset,
321 i32_f(val),
322 AS::Native,
323 AS::Native,
324 )],
325 AsmInstruction::PrintV(src) => vec![Instruction::phantom(
326 PhantomDiscriminant(NativePhantom::Print as u16),
327 i32_f(src),
328 F::ZERO,
329 AS::Native as u16,
330 )],
331 AsmInstruction::PrintF(src) => vec![Instruction::phantom(
332 PhantomDiscriminant(NativePhantom::Print as u16),
333 i32_f(src),
334 F::ZERO,
335 AS::Native as u16,
336 )],
337 AsmInstruction::PrintE(src) => (0..EF::D as i32)
338 .map(|i| {
339 Instruction::phantom(
340 PhantomDiscriminant(NativePhantom::Print as u16),
341 i32_f(src + i),
342 F::ZERO,
343 AS::Native as u16,
344 )
345 })
346 .collect(),
347 AsmInstruction::ImmF(dst, val) =>
348 vec![inst_med(
349 options.opcode_with_offset(FieldArithmeticOpcode::ADD),
350 i32_f(dst),
351 val,
352 F::ZERO,
353 AS::Native,
354 AS::Immediate,
355 AS::Native,
356 )],
357 AsmInstruction::CopyF(dst, src) =>
358 vec![inst_med(
359 options.opcode_with_offset(FieldArithmeticOpcode::ADD),
360 i32_f(dst),
361 i32_f(src),
362 F::ZERO,
363 AS::Native,
364 AS::Native,
365 AS::Immediate
366 )],
367 AsmInstruction::AddF(dst, lhs, rhs) | AsmInstruction::SubF(dst, lhs, rhs) | AsmInstruction::MulF(dst, lhs, rhs) | AsmInstruction::DivF(dst, lhs, rhs) => vec![
368 inst_med(
373 match instruction {
374 AsmInstruction::AddF(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::ADD),
375 AsmInstruction::SubF(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::SUB),
376 AsmInstruction::MulF(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::MUL),
377 AsmInstruction::DivF(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::DIV),
378 _ => unreachable!(),
379 },
380 i32_f(dst),
381 i32_f(lhs),
382 i32_f(rhs),
383 AS::Native,
384 AS::Native,
385 AS::Native,
386 ),
387 ],
388 AsmInstruction::AddFI(dst, lhs, rhs) | AsmInstruction::SubFI(dst, lhs, rhs) | AsmInstruction::MulFI(dst, lhs, rhs) | AsmInstruction::DivFI(dst, lhs, rhs) => vec![
389 inst_med(
394 match instruction {
395 AsmInstruction::AddFI(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::ADD),
396 AsmInstruction::SubFI(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::SUB),
397 AsmInstruction::MulFI(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::MUL),
398 AsmInstruction::DivFI(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::DIV),
399 _ => unreachable!(),
400 },
401 i32_f(dst),
402 i32_f(lhs),
403 rhs,
404 AS::Native,
405 AS::Native,
406 AS::Immediate,
407 ),
408 ],
409 AsmInstruction::SubFIN(dst, lhs, rhs) | AsmInstruction::DivFIN(dst, lhs, rhs) => vec![
410 inst_med(
413 match instruction {
414 AsmInstruction::SubFIN(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::SUB),
415 AsmInstruction::DivFIN(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::DIV),
416 _ => unreachable!(),
417 },
418 i32_f(dst),
419 lhs,
420 i32_f(rhs),
421 AS::Native,
422 AS::Immediate,
423 AS::Native,
424 ),
425 ],
426 AsmInstruction::AddE(dst, lhs, rhs) | AsmInstruction::SubE(dst, lhs, rhs) | AsmInstruction::MulE(dst, lhs, rhs) | AsmInstruction::DivE(dst, lhs, rhs) => vec![
427 inst(
430 match instruction {
431 AsmInstruction::AddE(_, _, _) => options.opcode_with_offset(FieldExtensionOpcode::FE4ADD),
432 AsmInstruction::SubE(_, _, _) => options.opcode_with_offset(FieldExtensionOpcode::FE4SUB),
433 AsmInstruction::MulE(_, _, _) => options.opcode_with_offset(FieldExtensionOpcode::BBE4MUL),
434 AsmInstruction::DivE(_, _, _) => options.opcode_with_offset(FieldExtensionOpcode::BBE4DIV),
435 _ => unreachable!(),
436 },
437 i32_f(dst),
438 i32_f(lhs),
439 i32_f(rhs),
440 AS::Native,
441 AS::Native,
442 )],
443 AsmInstruction::Poseidon2Compress(dst, src1, src2) => vec![inst(
444 options.opcode_with_offset(Poseidon2Opcode::COMP_POS2),
445 i32_f(dst),
446 i32_f(src1),
447 i32_f(src2),
448 AS::Native,
449 AS::Native,
450 )],
451 AsmInstruction::Poseidon2Permute(dst, src) => vec![inst(
452 options.opcode_with_offset(Poseidon2Opcode::PERM_POS2),
453 i32_f(dst),
454 i32_f(src),
455 F::ZERO,
456 AS::Native,
457 AS::Native,
458 )],
459 AsmInstruction::CycleTrackerStart() => {
460 if options.enable_cycle_tracker {
461 vec![Instruction::debug(PhantomDiscriminant(SysPhantom::CtStart as u16))]
462 } else {
463 vec![]
464 }
465 }
466 AsmInstruction::CycleTrackerEnd() => {
467 if options.enable_cycle_tracker {
468 vec![Instruction::debug(PhantomDiscriminant(SysPhantom::CtEnd as u16))]
469 } else {
470 vec![]
471 }
472 }
473 AsmInstruction::Publish(val, index) => vec![inst_med(
474 options.opcode_with_offset(PublishOpcode::PUBLISH),
475 F::ZERO,
476 i32_f(val),
477 i32_f(index),
478 AS::Immediate,
479 AS::Native,
480 AS::Native,
481 )],
482 AsmInstruction::FriReducedOpening(a, b, length, alpha, res, hint_id, is_init) => vec![Instruction {
483 opcode: options.opcode_with_offset(FriOpcode::FRI_REDUCED_OPENING),
484 a: i32_f(a),
485 b: i32_f(b),
486 c: i32_f(length),
487 d: i32_f(alpha),
488 e: i32_f(res),
489 f: i32_f(hint_id),
490 g: i32_f(is_init),
491 }],
492 AsmInstruction::VerifyBatchFelt(dim, opened, opened_length, sibling, index, commit) => vec![Instruction {
493 opcode: options.opcode_with_offset(VerifyBatchOpcode::VERIFY_BATCH),
494 a: i32_f(dim),
495 b: i32_f(opened),
496 c: i32_f(opened_length),
497 d: i32_f(sibling),
498 e: i32_f(index),
499 f: i32_f(commit),
500 g: F::ONE,
501 }],
502 AsmInstruction::VerifyBatchExt(dim, opened, opened_length, sibling, index, commit) => vec![Instruction {
503 opcode: options.opcode_with_offset(VerifyBatchOpcode::VERIFY_BATCH),
504 a: i32_f(dim),
505 b: i32_f(opened),
506 c: i32_f(opened_length),
507 d: i32_f(sibling),
508 e: i32_f(index),
509 f: i32_f(commit),
510 g: F::from_canonical_usize(4).inverse(),
511 }],
512 AsmInstruction::RangeCheck(v, x_bit, y_bit) => {
513 assert!((0..=16).contains(&x_bit));
514 assert!((0..=14).contains(&y_bit));
515 vec!
516 [inst(
517 options.opcode_with_offset(NativeRangeCheckOpcode::RANGE_CHECK),
518 i32_f(v),
519 i32_f(x_bit),
520 i32_f(y_bit),
521 AS::Native,
522 AS::Immediate,
524 )]
525 }
526 };
527
528 let debug_infos = vec![debug_info; instructions.len()];
529
530 Program::from_instructions_and_debug_infos(&instructions, &debug_infos)
531}
532
533pub fn convert_program<F: PrimeField32, EF: ExtensionField<F>>(
534 program: AssemblyCode<F, EF>,
535 options: CompilerOptions,
536) -> Program<F> {
537 let init_register_0 = inst_med(
539 options.opcode_with_offset(FieldArithmeticOpcode::ADD),
540 F::ZERO,
541 F::ZERO,
542 F::ZERO,
543 AS::Native,
544 AS::Immediate,
545 AS::Immediate,
546 );
547 let init_debug_info = None;
548
549 let mut block_start = vec![];
550 let mut pc_idx = 1;
551 for block in program.blocks.iter() {
552 block_start.push(pc_idx * DEFAULT_PC_STEP);
553
554 for (instruction, debug_info) in block.0.iter().zip(block.1.iter()) {
555 let instructions = convert_instruction::<F, EF>(
557 instruction.clone(),
558 debug_info.clone(),
559 F::from_canonical_u32(pc_idx * DEFAULT_PC_STEP),
560 |label| label,
561 &options,
562 );
563 pc_idx += instructions.len() as u32;
564 }
565 }
566
567 let mut result = Program::new_empty(DEFAULT_PC_STEP, 0, DEFAULT_MAX_NUM_PUBLIC_VALUES);
568 result.push_instruction_and_debug_info(init_register_0, init_debug_info);
569 for block in program.blocks.iter() {
570 for (instruction, debug_info) in block.0.iter().zip(block.1.iter()) {
571 let cur_size = result.len() as u32;
572 let cur_pc = cur_size * DEFAULT_PC_STEP;
573
574 let labels =
575 |label: F| F::from_canonical_u32(block_start[label.as_canonical_u64() as usize]);
576 let local_result = convert_instruction(
577 instruction.clone(),
578 debug_info.clone(),
579 F::from_canonical_u32(cur_pc),
580 labels,
581 &options,
582 );
583
584 result.append(local_result);
585 }
586 }
587
588 result
589}