1use openvm_circuit::arch::instructions::program::Program;
2use openvm_instructions::{
3 instruction::{DebugInfo, Instruction},
4 program::DEFAULT_PC_STEP,
5 LocalOpcode, PhantomDiscriminant, PublishOpcode, SysPhantom, SystemOpcode, VmOpcode,
6};
7use openvm_rv32im_transpiler::BranchEqualOpcode;
8use openvm_stark_backend::p3_field::{ExtensionField, PrimeField32, PrimeField64};
9use serde::{Deserialize, Serialize};
10
11use crate::{
12 asm::{AsmInstruction, AssemblyCode},
13 FieldArithmeticOpcode, FieldExtensionOpcode, FriOpcode, NativeBranchEqualOpcode,
14 NativeJalOpcode, NativeLoadStore4Opcode, NativeLoadStoreOpcode, NativePhantom,
15 NativeRangeCheckOpcode, Poseidon2Opcode, VerifyBatchOpcode,
16};
17
18#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
19pub struct CompilerOptions {
20 pub word_size: usize,
22 pub enable_cycle_tracker: bool,
23}
24
25impl Default for CompilerOptions {
26 fn default() -> Self {
27 CompilerOptions {
28 word_size: 8,
29 enable_cycle_tracker: false,
30 }
31 }
32}
33
34impl CompilerOptions {
35 pub fn opcode_with_offset<Opcode: LocalOpcode>(&self, opcode: Opcode) -> VmOpcode {
36 let offset = Opcode::CLASS_OFFSET;
37 VmOpcode::from_usize(offset + opcode.local_usize())
38 }
39 pub fn with_cycle_tracker(mut self) -> Self {
40 self.enable_cycle_tracker = true;
41 self
42 }
43}
44
45fn inst<F: PrimeField64>(opcode: VmOpcode, a: F, b: F, c: F, d: AS, e: AS) -> Instruction<F> {
46 Instruction {
47 opcode,
48 a,
49 b,
50 c,
51 d: d.to_field(),
52 e: e.to_field(),
53 f: F::ZERO,
54 g: F::ZERO,
55 }
56}
57
58#[allow(clippy::too_many_arguments)]
59fn inst_med<F: PrimeField64>(
60 opcode: VmOpcode,
61 a: F,
62 b: F,
63 c: F,
64 d: AS,
65 e: AS,
66 f: AS,
67) -> Instruction<F> {
68 Instruction {
69 opcode,
70 a,
71 b,
72 c,
73 d: d.to_field(),
74 e: e.to_field(),
75 f: f.to_field(),
76 g: F::ZERO,
77 }
78}
79
80#[derive(Clone, Copy)]
81#[repr(u8)]
82pub enum AS {
83 Immediate = 0,
84 Native = 4,
85}
86
87impl AS {
88 fn to_field<F: PrimeField64>(self) -> F {
89 match self {
90 AS::Immediate => F::ZERO,
91 AS::Native => F::from_canonical_u8(AS::Native as u8),
92 }
93 }
94}
95
96fn i32_f<F: PrimeField32>(x: i32) -> F {
97 let modulus = F::ORDER_U32;
98 assert!(x < modulus as i32 && x >= -(modulus as i32));
99 if x < 0 {
100 -F::from_canonical_u32((-x) as u32)
101 } else {
102 F::from_canonical_u32(x as u32)
103 }
104}
105
106fn convert_instruction<F: PrimeField32, EF: ExtensionField<F>>(
109 instruction: AsmInstruction<F, EF>,
110 debug_info: Option<DebugInfo>,
111 pc: F,
112 labels: impl Fn(F) -> F,
113 options: &CompilerOptions,
114) -> Program<F> {
115 let instructions = match instruction {
116 AsmInstruction::LoadFI(dst, src, index, size, offset) => vec![
117 inst(
119 options.opcode_with_offset(NativeLoadStoreOpcode::LOADW),
120 i32_f(dst),
121 index * size + offset,
122 i32_f(src),
123 AS::Native,
124 AS::Native,
125 ),
126 ],
127 AsmInstruction::LoadEI(dst, src, index, size, offset) => vec![
128 inst(
130 options.opcode_with_offset(NativeLoadStore4Opcode(NativeLoadStoreOpcode::LOADW)),
131 i32_f(dst),
132 index * size + offset,
133 i32_f(src),
134 AS::Native,
135 AS::Native,
136 ),
137 ],
138 AsmInstruction::StoreFI(val, addr, index, size, offset) => vec![
139 inst(
141 options.opcode_with_offset(NativeLoadStoreOpcode::STOREW),
142 i32_f(val),
143 index * size + offset,
144 i32_f(addr),
145 AS::Native,
146 AS::Native,
147 ),
148 ],
149 AsmInstruction::StoreEI(val, addr, index, size, offset) => vec![
150 inst(
152 options.opcode_with_offset(NativeLoadStore4Opcode(NativeLoadStoreOpcode::STOREW)),
153 i32_f(val),
154 index * size + offset,
155 i32_f(addr),
156 AS::Native,
157 AS::Native,
158 ),
159 ],
160 AsmInstruction::Jump(dst, label) => {
161 vec![
162 inst(
164 options.opcode_with_offset(NativeJalOpcode::JAL),
165 i32_f(dst),
166 labels(label) - pc,
167 F::ZERO,
168 AS::Native,
169 AS::Immediate,
170 ),
171 ]
172 }
173 AsmInstruction::Bne(label, lhs, rhs) => vec![
174 inst(
176 options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BNE)),
177 i32_f(lhs),
178 i32_f(rhs),
179 labels(label) - pc,
180 AS::Native,
181 AS::Native,
182 ),
183 ],
184 AsmInstruction::BneI(label, lhs, rhs) => vec![
185 inst(
187 options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BNE)),
188 i32_f(lhs),
189 rhs,
190 labels(label) - pc,
191 AS::Native,
192 AS::Immediate,
193 ),
194 ],
195 AsmInstruction::Beq(label, lhs, rhs) => vec![
196 inst(
198 options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BEQ)),
199 i32_f(lhs),
200 i32_f(rhs),
201 labels(label) - pc,
202 AS::Native,
203 AS::Native,
204 ),
205 ],
206 AsmInstruction::BeqI(label, lhs, rhs) => vec![
207 inst(
209 options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BEQ)),
210 i32_f(lhs),
211 rhs,
212 labels(label) - pc,
213 AS::Native,
214 AS::Immediate,
215 ),
216 ],
217 AsmInstruction::BneE(label, lhs, rhs) => (0..EF::D)
218 .map(|i|
219 inst(
221 options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BNE)),
222 i32_f(lhs + (i as i32)),
223 i32_f(rhs + (i as i32)),
224 labels(label) - (pc + F::from_canonical_usize(i * DEFAULT_PC_STEP as usize)),
225 AS::Native,
226 AS::Native,
227 ))
228 .collect(),
229 AsmInstruction::BneEI(label, lhs, rhs) => (0..EF::D)
230 .map(|i|
231 inst(
233 options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BNE)),
234 i32_f(lhs + (i as i32)),
235 rhs.as_base_slice()[i],
236 labels(label) - (pc + F::from_canonical_usize(i * DEFAULT_PC_STEP as usize)),
237 AS::Native,
238 AS::Immediate,
239 ))
240 .collect(),
241 AsmInstruction::BeqE(label, lhs, rhs) => (0..EF::D)
242 .rev()
243 .map(|i|
244 inst(
246 if i == 0 { options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BEQ)) } else { options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BNE)) },
247 i32_f(lhs + (i as i32)),
248 i32_f(rhs + (i as i32)),
249 if i == 0 {
250 labels(label) - (pc + F::from_canonical_usize((EF::D - 1) * DEFAULT_PC_STEP as usize))
251 } else {
252 F::from_canonical_usize((i + 1) * DEFAULT_PC_STEP as usize)
253 },
254 AS::Native,
255 AS::Native,
256 ))
257 .collect(),
258 AsmInstruction::BeqEI(label, lhs, rhs) => (0..EF::D)
259 .rev()
260 .map(|i|
261 inst(
263 if i == 0 { options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BEQ)) } else { options.opcode_with_offset(NativeBranchEqualOpcode(BranchEqualOpcode::BNE)) },
264 i32_f(lhs + (i as i32)),
265 rhs.as_base_slice()[i],
266 if i == 0 {
267 labels(label) - (pc + F::from_canonical_usize((EF::D - 1) * DEFAULT_PC_STEP as usize))
268 } else {
269 F::from_canonical_usize((i + 1) * DEFAULT_PC_STEP as usize)
270 },
271 AS::Native,
272 AS::Immediate,
273 ))
274 .collect(),
275 AsmInstruction::Trap => vec![
276 Instruction::phantom(PhantomDiscriminant(SysPhantom::DebugPanic as u16), F::ZERO, F::ZERO, 0),
277 inst(
279 options.opcode_with_offset(SystemOpcode::TERMINATE),
280 F::ZERO,
281 F::ZERO,
282 F::ONE,
283 AS::Immediate,
284 AS::Immediate,
285 ),
286 ],
287 AsmInstruction::Halt => vec![
288 inst(
290 options.opcode_with_offset(SystemOpcode::TERMINATE),
291 F::ZERO,
292 F::ZERO,
293 F::ZERO,
294 AS::Immediate,
295 AS::Immediate,
296 ),
297 ],
298 AsmInstruction::HintInputVec() => vec![
299 Instruction::phantom(PhantomDiscriminant(NativePhantom::HintInput as u16), F::ZERO, F::ZERO, 0)
300 ],
301 AsmInstruction::HintFelt() => vec![
302 Instruction::phantom(PhantomDiscriminant(NativePhantom::HintFelt as u16), F::ZERO, F::ZERO, 0)
303 ],
304 AsmInstruction::HintBits(src, len) => vec![
305 Instruction::phantom(PhantomDiscriminant(NativePhantom::HintBits as u16), i32_f(src), F::from_canonical_u32(len), AS::Native as u16)
306 ],
307 AsmInstruction::HintLoad() => vec![
308 Instruction::phantom(PhantomDiscriminant(NativePhantom::HintLoad as u16), F::ZERO, F::ZERO, 0)
309 ],
310 AsmInstruction::StoreHintWordI(val, offset) => vec![inst(
311 options.opcode_with_offset(NativeLoadStoreOpcode::HINT_STOREW),
312 F::ZERO,
313 offset,
314 i32_f(val),
315 AS::Native,
316 AS::Native,
317 )],
318 AsmInstruction::StoreHintExtI(val, offset) => vec![inst(
319 options.opcode_with_offset(NativeLoadStore4Opcode(NativeLoadStoreOpcode::HINT_STOREW)),
320 F::ZERO,
321 offset,
322 i32_f(val),
323 AS::Native,
324 AS::Native,
325 )],
326 AsmInstruction::PrintV(src) => vec![Instruction::phantom(
327 PhantomDiscriminant(NativePhantom::Print as u16),
328 i32_f(src),
329 F::ZERO,
330 AS::Native as u16,
331 )],
332 AsmInstruction::PrintF(src) => vec![Instruction::phantom(
333 PhantomDiscriminant(NativePhantom::Print as u16),
334 i32_f(src),
335 F::ZERO,
336 AS::Native as u16,
337 )],
338 AsmInstruction::PrintE(src) => (0..EF::D as i32)
339 .map(|i| {
340 Instruction::phantom(
341 PhantomDiscriminant(NativePhantom::Print as u16),
342 i32_f(src + i),
343 F::ZERO,
344 AS::Native as u16,
345 )
346 })
347 .collect(),
348 AsmInstruction::ImmF(dst, val) =>
349 vec![inst_med(
350 options.opcode_with_offset(FieldArithmeticOpcode::ADD),
351 i32_f(dst),
352 val,
353 F::ZERO,
354 AS::Native,
355 AS::Immediate,
356 AS::Native,
357 )],
358 AsmInstruction::CopyF(dst, src) =>
359 vec![inst_med(
360 options.opcode_with_offset(FieldArithmeticOpcode::ADD),
361 i32_f(dst),
362 i32_f(src),
363 F::ZERO,
364 AS::Native,
365 AS::Native,
366 AS::Immediate
367 )],
368 AsmInstruction::AddF(dst, lhs, rhs) | AsmInstruction::SubF(dst, lhs, rhs) | AsmInstruction::MulF(dst, lhs, rhs) | AsmInstruction::DivF(dst, lhs, rhs) => vec![
369 inst_med(
374 match instruction {
375 AsmInstruction::AddF(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::ADD),
376 AsmInstruction::SubF(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::SUB),
377 AsmInstruction::MulF(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::MUL),
378 AsmInstruction::DivF(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::DIV),
379 _ => unreachable!(),
380 },
381 i32_f(dst),
382 i32_f(lhs),
383 i32_f(rhs),
384 AS::Native,
385 AS::Native,
386 AS::Native,
387 ),
388 ],
389 AsmInstruction::AddFI(dst, lhs, rhs) | AsmInstruction::SubFI(dst, lhs, rhs) | AsmInstruction::MulFI(dst, lhs, rhs) | AsmInstruction::DivFI(dst, lhs, rhs) => vec![
390 inst_med(
395 match instruction {
396 AsmInstruction::AddFI(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::ADD),
397 AsmInstruction::SubFI(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::SUB),
398 AsmInstruction::MulFI(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::MUL),
399 AsmInstruction::DivFI(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::DIV),
400 _ => unreachable!(),
401 },
402 i32_f(dst),
403 i32_f(lhs),
404 rhs,
405 AS::Native,
406 AS::Native,
407 AS::Immediate,
408 ),
409 ],
410 AsmInstruction::SubFIN(dst, lhs, rhs) | AsmInstruction::DivFIN(dst, lhs, rhs) => vec![
411 inst_med(
414 match instruction {
415 AsmInstruction::SubFIN(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::SUB),
416 AsmInstruction::DivFIN(_, _, _) => options.opcode_with_offset(FieldArithmeticOpcode::DIV),
417 _ => unreachable!(),
418 },
419 i32_f(dst),
420 lhs,
421 i32_f(rhs),
422 AS::Native,
423 AS::Immediate,
424 AS::Native,
425 ),
426 ],
427 AsmInstruction::AddE(dst, lhs, rhs) | AsmInstruction::SubE(dst, lhs, rhs) | AsmInstruction::MulE(dst, lhs, rhs) | AsmInstruction::DivE(dst, lhs, rhs) => vec![
428 inst(
431 match instruction {
432 AsmInstruction::AddE(_, _, _) => options.opcode_with_offset(FieldExtensionOpcode::FE4ADD),
433 AsmInstruction::SubE(_, _, _) => options.opcode_with_offset(FieldExtensionOpcode::FE4SUB),
434 AsmInstruction::MulE(_, _, _) => options.opcode_with_offset(FieldExtensionOpcode::BBE4MUL),
435 AsmInstruction::DivE(_, _, _) => options.opcode_with_offset(FieldExtensionOpcode::BBE4DIV),
436 _ => unreachable!(),
437 },
438 i32_f(dst),
439 i32_f(lhs),
440 i32_f(rhs),
441 AS::Native,
442 AS::Native,
443 )],
444 AsmInstruction::Poseidon2Compress(dst, src1, src2) => vec![inst(
445 options.opcode_with_offset(Poseidon2Opcode::COMP_POS2),
446 i32_f(dst),
447 i32_f(src1),
448 i32_f(src2),
449 AS::Native,
450 AS::Native,
451 )],
452 AsmInstruction::Poseidon2Permute(dst, src) => vec![inst(
453 options.opcode_with_offset(Poseidon2Opcode::PERM_POS2),
454 i32_f(dst),
455 i32_f(src),
456 F::ZERO,
457 AS::Native,
458 AS::Native,
459 )],
460 AsmInstruction::CycleTrackerStart() => {
461 if options.enable_cycle_tracker {
462 vec![Instruction::debug(PhantomDiscriminant(SysPhantom::CtStart as u16))]
463 } else {
464 vec![]
465 }
466 }
467 AsmInstruction::CycleTrackerEnd() => {
468 if options.enable_cycle_tracker {
469 vec![Instruction::debug(PhantomDiscriminant(SysPhantom::CtEnd as u16))]
470 } else {
471 vec![]
472 }
473 }
474 AsmInstruction::Publish(val, index) => vec![inst_med(
475 options.opcode_with_offset(PublishOpcode::PUBLISH),
476 F::ZERO,
477 i32_f(val),
478 i32_f(index),
479 AS::Immediate,
480 AS::Native,
481 AS::Native,
482 )],
483 AsmInstruction::FriReducedOpening(a, b, length, alpha, res, hint_id, is_init) => vec![Instruction {
484 opcode: options.opcode_with_offset(FriOpcode::FRI_REDUCED_OPENING),
485 a: i32_f(a),
486 b: i32_f(b),
487 c: i32_f(length),
488 d: i32_f(alpha),
489 e: i32_f(res),
490 f: i32_f(hint_id),
491 g: i32_f(is_init),
492 }],
493 AsmInstruction::VerifyBatchFelt(dim, opened, opened_length, sibling, index, commit) => vec![Instruction {
494 opcode: options.opcode_with_offset(VerifyBatchOpcode::VERIFY_BATCH),
495 a: i32_f(dim),
496 b: i32_f(opened),
497 c: i32_f(opened_length),
498 d: i32_f(sibling),
499 e: i32_f(index),
500 f: i32_f(commit),
501 g: F::ONE,
502 }],
503 AsmInstruction::VerifyBatchExt(dim, opened, opened_length, sibling, index, commit) => vec![Instruction {
504 opcode: options.opcode_with_offset(VerifyBatchOpcode::VERIFY_BATCH),
505 a: i32_f(dim),
506 b: i32_f(opened),
507 c: i32_f(opened_length),
508 d: i32_f(sibling),
509 e: i32_f(index),
510 f: i32_f(commit),
511 g: F::from_canonical_usize(4).inverse(),
512 }],
513 AsmInstruction::RangeCheck(v, x_bit, y_bit) => {
514 assert!((0..=16).contains(&x_bit));
515 assert!((0..=14).contains(&y_bit));
516 vec!
517 [inst(
518 options.opcode_with_offset(NativeRangeCheckOpcode::RANGE_CHECK),
519 i32_f(v),
520 i32_f(x_bit),
521 i32_f(y_bit),
522 AS::Native,
523 AS::Immediate,
525 )]
526 }
527 };
528
529 let debug_infos = vec![debug_info; instructions.len()];
530
531 Program::from_instructions_and_debug_infos(&instructions, &debug_infos)
532}
533
534pub fn convert_program<F: PrimeField32, EF: ExtensionField<F>>(
535 program: AssemblyCode<F, EF>,
536 options: CompilerOptions,
537) -> Program<F> {
538 let init_register_0 = inst_med(
540 options.opcode_with_offset(FieldArithmeticOpcode::ADD),
541 F::ZERO,
542 F::ZERO,
543 F::ZERO,
544 AS::Native,
545 AS::Immediate,
546 AS::Immediate,
547 );
548 let init_debug_info = None;
549
550 let mut block_start = vec![];
551 let mut pc_idx = 1;
552 for block in program.blocks.iter() {
553 block_start.push(pc_idx * DEFAULT_PC_STEP);
554
555 for (instruction, debug_info) in block.0.iter().zip(block.1.iter()) {
556 let instructions = convert_instruction::<F, EF>(
558 instruction.clone(),
559 debug_info.clone(),
560 F::from_canonical_u32(pc_idx * DEFAULT_PC_STEP),
561 |label| label,
562 &options,
563 );
564 pc_idx += instructions.len() as u32;
565 }
566 }
567
568 let mut result = Program::new_empty(0);
569 result.push_instruction_and_debug_info(init_register_0, init_debug_info);
570 for block in program.blocks.iter() {
571 for (instruction, debug_info) in block.0.iter().zip(block.1.iter()) {
572 let cur_size = result.len() as u32;
573 let cur_pc = cur_size * DEFAULT_PC_STEP;
574
575 let labels =
576 |label: F| F::from_canonical_u32(block_start[label.as_canonical_u64() as usize]);
577 let local_result = convert_instruction(
578 instruction.clone(),
579 debug_info.clone(),
580 F::from_canonical_u32(cur_pc),
581 labels,
582 &options,
583 );
584
585 result.append(local_result);
586 }
587 }
588
589 result
590}