1use std::{array::from_fn, fmt::Debug, num::NonZero};
2
3use getset::Getters;
4use itertools::zip_eq;
5use openvm_instructions::exe::SparseMemoryImage;
6use openvm_stark_backend::{
7 p3_field::{Field, PrimeField32},
8 p3_maybe_rayon::prelude::*,
9 p3_util::log2_strict_usize,
10};
11use tracing::instrument;
12
13use crate::{
14 arch::{
15 AddressSpaceHostConfig, AddressSpaceHostLayout, DenseRecordArena, MemoryConfig,
16 RecordArena, MAX_CELL_BYTE_SIZE,
17 },
18 system::{
19 memory::{
20 adapter::records::{AccessLayout, AccessRecordHeader, MERGE_AND_NOT_SPLIT_FLAG},
21 MemoryAddress, TimestampedEquipartition, TimestampedValues, CHUNK,
22 },
23 TouchedMemory,
24 },
25 utils::slice_as_bytes,
26};
27
28mod basic;
29#[cfg(any(unix, windows))]
30mod memmap;
31mod paged_vec;
32
33#[cfg(not(any(unix, windows)))]
34pub use basic::*;
35#[cfg(any(unix, windows))]
36pub use memmap::*;
37pub use paged_vec::PagedVec;
38
39#[cfg(all(any(unix, windows), not(feature = "basic-memory")))]
40pub type MemoryBackend = memmap::MmapMemory;
41#[cfg(any(not(any(unix, windows)), feature = "basic-memory"))]
42pub type MemoryBackend = basic::BasicMemory;
43
44pub const INITIAL_TIMESTAMP: u32 = 0;
45pub const PAGE_SIZE: usize = 4096;
47
48const MAX_BLOCK_SIZE: usize = 32;
50const MIN_ALIGN: usize = 1;
51const MAX_SEGMENTS: usize = MAX_BLOCK_SIZE / MIN_ALIGN;
52
53pub type Address = (u32, u32);
55
56pub trait LinearMemory {
58 fn new(size: usize) -> Self;
60 fn size(&self) -> usize;
62 fn as_slice(&self) -> &[u8];
64 fn as_mut_slice(&mut self) -> &mut [u8];
66 fn fill_zero(&mut self) {
68 self.as_mut_slice().fill(0);
69 }
70 unsafe fn read<BLOCK: Copy>(&self, from: usize) -> BLOCK;
81 unsafe fn read_unaligned<BLOCK: Copy>(&self, from: usize) -> BLOCK;
91 unsafe fn write<BLOCK: Copy>(&mut self, start: usize, values: BLOCK);
101 unsafe fn write_unaligned<BLOCK: Copy>(&mut self, start: usize, values: BLOCK);
110 unsafe fn swap<BLOCK: Copy>(&mut self, start: usize, values: &mut BLOCK);
120 unsafe fn copy_nonoverlapping<T: Copy>(&mut self, to: usize, data: &[T]);
131 unsafe fn get_aligned_slice<T: Copy>(&self, start: usize, len: usize) -> &[T];
140}
141
142#[derive(Debug, Clone)]
148#[repr(C)]
149pub struct AddressMap<M: LinearMemory = MemoryBackend> {
150 pub mem: Vec<M>,
152 pub config: Vec<AddressSpaceHostConfig>,
154}
155
156impl Default for AddressMap {
157 fn default() -> Self {
158 Self::from_mem_config(&MemoryConfig::default())
159 }
160}
161
162impl<M: LinearMemory> AddressMap<M> {
163 pub fn new(config: Vec<AddressSpaceHostConfig>) -> Self {
164 assert_eq!(config[0].num_cells, 0, "Address space 0 must have 0 cells");
165 let mem = config
166 .iter()
167 .map(|config| M::new(config.num_cells.checked_mul(config.layout.size()).unwrap()))
168 .collect();
169 Self { mem, config }
170 }
171
172 pub fn from_mem_config(mem_config: &MemoryConfig) -> Self {
173 Self::new(mem_config.addr_spaces.clone())
174 }
175
176 #[inline(always)]
177 pub fn get_memory(&self) -> &Vec<M> {
178 &self.mem
179 }
180
181 #[inline(always)]
182 pub fn get_memory_mut(&mut self) -> &mut Vec<M> {
183 &mut self.mem
184 }
185
186 pub fn fill_zero(&mut self) {
188 for mem in &mut self.mem {
189 mem.fill_zero();
190 }
191 }
192
193 pub unsafe fn get_f<F: PrimeField32>(&self, addr_space: u32, ptr: u32) -> F {
196 let layout = &self.config.get_unchecked(addr_space as usize).layout;
197 let start = ptr as usize * layout.size();
198 let bytes = self.get_u8_slice(addr_space, start, layout.size());
199 layout.to_field(bytes)
200 }
201
202 pub unsafe fn get<T: Copy>(&self, (addr_space, ptr): Address) -> T {
206 debug_assert_eq!(
207 size_of::<T>(),
208 self.config[addr_space as usize].layout.size()
209 );
210 self.mem
213 .get_unchecked(addr_space as usize)
214 .read((ptr as usize) * size_of::<T>())
215 }
216
217 pub unsafe fn get_slice<T: Copy + Debug>(
223 &self,
224 (addr_space, ptr): Address,
225 len: usize,
226 ) -> &[T] {
227 debug_assert_eq!(
228 size_of::<T>(),
229 self.config[addr_space as usize].layout.size()
230 );
231 let start = (ptr as usize) * size_of::<T>();
232 let mem = self.mem.get_unchecked(addr_space as usize);
233 mem.get_aligned_slice(start, len)
236 }
237
238 pub unsafe fn get_u8_slice(&self, addr_space: u32, start: usize, len: usize) -> &[u8] {
244 let mem = self.mem.get_unchecked(addr_space as usize);
245 mem.get_aligned_slice(start, len)
246 }
247
248 pub unsafe fn copy_slice_nonoverlapping<T: Copy>(
256 &mut self,
257 (addr_space, ptr): Address,
258 data: &[T],
259 ) {
260 let start = (ptr as usize) * size_of::<T>();
261 self.mem
266 .get_unchecked_mut(addr_space as usize)
267 .copy_nonoverlapping(start, data);
268 }
269
270 pub fn set_from_sparse(&mut self, sparse_map: &SparseMemoryImage) {
275 for (&(addr_space, index), &data_byte) in sparse_map.iter() {
276 unsafe {
279 self.mem
280 .get_unchecked_mut(addr_space as usize)
281 .write_unaligned(index as usize, data_byte);
282 }
283 }
284 }
285}
286
287#[derive(Debug, Clone)]
291#[repr(C)]
292pub struct GuestMemory {
293 pub memory: AddressMap,
294}
295
296impl GuestMemory {
297 pub fn new(addr: AddressMap) -> Self {
298 Self { memory: addr }
299 }
300
301 #[inline(always)]
309 pub unsafe fn read<T, const BLOCK_SIZE: usize>(
310 &self,
311 addr_space: u32,
312 ptr: u32,
313 ) -> [T; BLOCK_SIZE]
314 where
315 T: Copy + Debug,
316 {
317 self.debug_assert_cell_type::<T>(addr_space);
318 self.memory
322 .get_memory()
323 .get_unchecked(addr_space as usize)
324 .read((ptr as usize) * size_of::<T>())
325 }
326
327 #[inline(always)]
332 pub unsafe fn write<T, const BLOCK_SIZE: usize>(
333 &mut self,
334 addr_space: u32,
335 ptr: u32,
336 values: [T; BLOCK_SIZE],
337 ) where
338 T: Copy + Debug,
339 {
340 self.debug_assert_cell_type::<T>(addr_space);
341 self.memory
344 .get_memory_mut()
345 .get_unchecked_mut(addr_space as usize)
346 .write((ptr as usize) * size_of::<T>(), values);
347 }
348
349 #[inline(always)]
354 pub unsafe fn swap<T, const BLOCK_SIZE: usize>(
355 &mut self,
356 addr_space: u32,
357 ptr: u32,
358 values: &mut [T; BLOCK_SIZE],
359 ) where
360 T: Copy + Debug,
361 {
362 self.debug_assert_cell_type::<T>(addr_space);
363 self.memory
366 .get_memory_mut()
367 .get_unchecked_mut(addr_space as usize)
368 .swap((ptr as usize) * size_of::<T>(), values);
369 }
370
371 #[inline(always)]
372 #[allow(clippy::missing_safety_doc)]
373 pub unsafe fn get_slice<T: Copy + Debug>(&self, addr_space: u32, ptr: u32, len: usize) -> &[T] {
374 self.memory.get_slice((addr_space, ptr), len)
375 }
376
377 #[inline(always)]
378 fn debug_assert_cell_type<T>(&self, addr_space: u32) {
379 debug_assert_eq!(
380 size_of::<T>(),
381 self.memory.config[addr_space as usize].layout.size()
382 );
383 }
384}
385
386#[repr(C)]
387#[derive(Clone, Copy, PartialEq, Eq, Debug, Default)]
388pub struct AccessMetadata {
389 pub timestamp_and_log_block_size: u32,
391 pub offset_to_start: u8,
393}
394
395impl AccessMetadata {
396 const TIMESTAMP_MASK: u32 = (1 << 29) - 1;
397 const LOG_BLOCK_SIZE_SHIFT: u32 = 29;
398
399 pub fn new(timestamp: u32, block_size: u8, offset_to_start: u8) -> Self {
400 debug_assert!(timestamp < (1 << 29), "Timestamp must be less than 2^29");
401 debug_assert!(
402 block_size == 0 || (block_size.is_power_of_two() && block_size <= MAX_BLOCK_SIZE as u8),
403 "Block size must be 0 or power of 2 and <= {MAX_BLOCK_SIZE}"
404 );
405
406 let encoded_block_size = if block_size == 0 {
407 0
408 } else {
409 unsafe { NonZero::new_unchecked(block_size) }.ilog2() + 1
411 };
412 let packed = timestamp | (encoded_block_size << Self::LOG_BLOCK_SIZE_SHIFT);
413
414 Self {
415 timestamp_and_log_block_size: packed,
416 offset_to_start,
417 }
418 }
419
420 pub fn timestamp(&self) -> u32 {
421 self.timestamp_and_log_block_size & Self::TIMESTAMP_MASK
422 }
423
424 pub fn block_size(&self) -> u8 {
425 let encoded = self.timestamp_and_log_block_size >> Self::LOG_BLOCK_SIZE_SHIFT;
426 if encoded == 0 {
427 0
428 } else {
429 1 << (encoded - 1)
430 }
431 }
432}
433
434#[derive(Getters)]
437pub struct TracingMemory {
438 pub timestamp: u32,
439 initial_block_size: usize,
441 #[getset(get = "pub")]
443 pub data: GuestMemory,
444 pub(super) meta: Vec<PagedVec<AccessMetadata, PAGE_SIZE>>,
449 pub min_block_size: Vec<u32>,
452 pub access_adapter_records: DenseRecordArena,
453}
454
455const INITIAL_CELL_BUFFER: &[u8] = &[0u8; 8];
457const INITIAL_TIMESTAMP_BUFFER: &[u32] = &[INITIAL_TIMESTAMP; 8];
459
460impl TracingMemory {
461 pub fn new(
462 mem_config: &MemoryConfig,
463 initial_block_size: usize,
464 access_adapter_arena_size_bound: usize,
465 ) -> Self {
466 let image = GuestMemory::new(AddressMap::from_mem_config(mem_config));
467 Self::from_image(image, initial_block_size, access_adapter_arena_size_bound)
468 }
469
470 pub fn from_image(
472 image: GuestMemory,
473 initial_block_size: usize,
474 access_adapter_arena_size_bound: usize,
475 ) -> Self {
476 let (meta, min_block_size): (Vec<_>, Vec<_>) =
477 zip_eq(image.memory.get_memory(), &image.memory.config)
478 .map(|(mem, addr_sp)| {
479 let num_cells = mem.size() / addr_sp.layout.size();
480 let min_block_size = addr_sp.min_block_size;
481 let total_metadata_len = num_cells.div_ceil(min_block_size);
482 (PagedVec::new(total_metadata_len), min_block_size as u32)
483 })
484 .unzip();
485 let access_adapter_records =
486 DenseRecordArena::with_byte_capacity(access_adapter_arena_size_bound);
487 Self {
488 data: image,
489 meta,
490 min_block_size,
491 timestamp: INITIAL_TIMESTAMP + 1,
492 initial_block_size,
493 access_adapter_records,
494 }
495 }
496
497 #[inline(always)]
498 fn assert_alignment(&self, block_size: usize, align: usize, addr_space: u32, ptr: u32) {
499 debug_assert!(block_size.is_power_of_two());
500 debug_assert_eq!(block_size % align, 0);
501 debug_assert_ne!(addr_space, 0);
502 debug_assert_eq!(align as u32, self.min_block_size[addr_space as usize]);
503 assert_eq!(
504 ptr % (align as u32),
505 0,
506 "pointer={ptr} not aligned to {align}"
507 );
508 }
509
510 #[inline(always)]
513 fn get_block_metadata<const ALIGN: usize>(
514 &mut self,
515 address_space: usize,
516 pointer: usize,
517 ) -> (u32, AccessMetadata) {
518 let ptr_index = pointer / ALIGN;
519 let meta_page = unsafe { self.meta.get_unchecked_mut(address_space) };
523 let current_meta = meta_page.get(ptr_index);
524
525 let (block_start_index, block_metadata) = if current_meta.offset_to_start == 0 {
526 (ptr_index, current_meta)
527 } else {
528 let offset = current_meta.offset_to_start;
529 let start_idx = ptr_index - offset as usize;
530 let start_meta = meta_page.get(start_idx);
531 (start_idx, start_meta)
532 };
533
534 let block_start_pointer = (block_start_index * ALIGN) as u32;
535
536 (block_start_pointer, block_metadata)
537 }
538
539 #[inline(always)]
540 fn get_timestamp<const ALIGN: usize>(&mut self, address_space: usize, pointer: usize) -> u32 {
541 let ptr_index = pointer / ALIGN;
542 let meta_page = unsafe { self.meta.get_unchecked_mut(address_space) };
546 let current_meta = meta_page.get(ptr_index);
547
548 if current_meta.offset_to_start == 0 {
549 current_meta.timestamp()
550 } else {
551 let offset = current_meta.offset_to_start;
552 let block_start_index = ptr_index - offset as usize;
553 meta_page.get(block_start_index).timestamp()
554 }
555 }
556
557 #[inline(always)]
560 fn set_meta_block<const BLOCK_SIZE: usize, const ALIGN: usize>(
561 &mut self,
562 address_space: usize,
563 pointer: usize,
564 timestamp: u32,
565 ) {
566 let ptr = pointer / ALIGN;
567 let meta_page = unsafe { self.meta.get_unchecked_mut(address_space) };
569
570 meta_page.set(ptr, AccessMetadata::new(timestamp, BLOCK_SIZE as u8, 0));
572
573 for i in 1..(BLOCK_SIZE / ALIGN) {
575 meta_page.set(ptr + i, AccessMetadata::new(0, 0, i as u8));
576 }
577 }
578
579 pub(crate) fn add_split_record(&mut self, header: AccessRecordHeader) {
580 if header.block_size == header.lowest_block_size {
581 return;
582 }
583 let data_slice = unsafe {
590 self.data.memory.get_u8_slice(
591 header.address_space,
592 (header.pointer * header.type_size) as usize,
593 (header.block_size * header.type_size) as usize,
594 )
595 };
596
597 let record_mut = self
598 .access_adapter_records
599 .alloc(AccessLayout::from_record_header(&header));
600 *record_mut.header = header;
601 record_mut.data.copy_from_slice(data_slice);
602 }
604
605 pub(crate) fn add_merge_record(
607 &mut self,
608 header: AccessRecordHeader,
609 data_slice: &[u8],
610 prev_ts: &[u32],
611 ) {
612 if header.block_size == header.lowest_block_size {
613 return;
614 }
615
616 let record_mut = self
617 .access_adapter_records
618 .alloc(AccessLayout::from_record_header(&header));
619 *record_mut.header = header;
620 record_mut.header.timestamp_and_mask |= MERGE_AND_NOT_SPLIT_FLAG;
621 record_mut.data.copy_from_slice(data_slice);
622 record_mut.timestamps.copy_from_slice(prev_ts);
623 }
624
625 #[inline(always)]
628 #[allow(clippy::type_complexity)]
629 fn calculate_splits_and_merges<const BLOCK_SIZE: usize, const ALIGN: usize>(
630 &mut self,
631 address_space: usize,
632 pointer: usize,
633 ) -> Option<(Vec<(usize, usize)>, (usize, usize))> {
634 let (start_ptr, block_meta) = self.get_block_metadata::<ALIGN>(address_space, pointer);
636 if block_meta.block_size() == BLOCK_SIZE as u8 && start_ptr == pointer as u32 {
637 return None;
638 }
639
640 let mut splits_buf = [(0usize, 0usize); MAX_SEGMENTS];
642 let mut splits_count = 0;
643 let mut current_ptr = pointer;
644 let end_ptr = pointer + BLOCK_SIZE;
645
646 while current_ptr < end_ptr {
647 let (start_ptr, block_metadata) =
648 self.get_block_metadata::<ALIGN>(address_space, current_ptr);
649
650 if block_metadata.block_size() == 0 {
651 current_ptr += ALIGN;
652 continue;
653 }
654
655 if block_metadata.block_size() > ALIGN as u8 {
656 unsafe {
659 *splits_buf.get_unchecked_mut(splits_count) =
660 (start_ptr as usize, block_metadata.block_size() as usize);
661 }
662 splits_count += 1;
663 }
664
665 current_ptr = start_ptr as usize + block_metadata.block_size() as usize;
667 }
668
669 let merge = (pointer, BLOCK_SIZE);
670
671 Some((splits_buf[..splits_count].to_vec(), merge))
672 }
673
674 #[inline(always)]
675 fn split_by_meta<T: Copy, const MIN_BLOCK_SIZE: usize>(
676 &mut self,
677 start_ptr: u32,
678 timestamp: u32,
679 block_size: u8,
680 address_space: usize,
681 ) {
682 if block_size == MIN_BLOCK_SIZE as u8 {
683 return;
684 }
685 let begin = start_ptr as usize / MIN_BLOCK_SIZE;
686 let meta_page = unsafe { self.meta.get_unchecked_mut(address_space) };
690
691 for i in 0..(block_size as usize / MIN_BLOCK_SIZE) {
692 meta_page.set(
694 begin + i,
695 AccessMetadata::new(timestamp, MIN_BLOCK_SIZE as u8, 0),
696 );
697 }
698 self.add_split_record(AccessRecordHeader {
699 timestamp_and_mask: timestamp,
700 address_space: address_space as u32,
701 pointer: start_ptr,
702 block_size: block_size as u32,
703 lowest_block_size: MIN_BLOCK_SIZE as u32,
704 type_size: size_of::<T>() as u32,
705 });
706 }
707
708 #[inline(always)]
712 fn prev_access_time<T: Copy, const BLOCK_SIZE: usize, const ALIGN: usize>(
713 &mut self,
714 address_space: usize,
715 pointer: usize,
716 prev_values: &[T; BLOCK_SIZE],
717 ) -> u32 {
718 debug_assert_eq!(ALIGN, self.data.memory.config[address_space].min_block_size);
719 debug_assert_eq!(
723 unsafe {
724 self.data
725 .memory
726 .config
727 .get_unchecked(address_space)
728 .layout
729 .size()
730 },
731 size_of::<T>()
732 );
733 let result = if let Some((splits, (merge_ptr, merge_size))) =
735 self.calculate_splits_and_merges::<BLOCK_SIZE, ALIGN>(address_space, pointer)
736 {
737 for (split_ptr, split_size) in splits {
739 let (_, block_metadata) =
740 self.get_block_metadata::<ALIGN>(address_space, split_ptr);
741 let timestamp = block_metadata.timestamp();
742 self.split_by_meta::<T, ALIGN>(
743 split_ptr as u32,
744 timestamp,
745 split_size as u8,
746 address_space,
747 );
748 }
749
750 let mut prev_ts_buf = [0u32; MAX_SEGMENTS];
752
753 let mut max_timestamp = INITIAL_TIMESTAMP;
754
755 let mut ptr = merge_ptr;
756 let end_ptr = merge_ptr + merge_size;
757 let mut seg_idx = 0;
758 while ptr < end_ptr {
759 let (_, block_metadata) = self.get_block_metadata::<ALIGN>(address_space, ptr);
760
761 let timestamp = if block_metadata.block_size() > 0 {
762 block_metadata.timestamp()
763 } else {
764 self.handle_uninitialized_memory::<T, ALIGN>(address_space, ptr);
765 INITIAL_TIMESTAMP
766 };
767
768 unsafe {
771 *prev_ts_buf.get_unchecked_mut(seg_idx) = timestamp;
772 }
773 max_timestamp = max_timestamp.max(timestamp);
774 ptr += ALIGN;
775 seg_idx += 1;
776 }
777
778 self.add_merge_record(
780 AccessRecordHeader {
781 timestamp_and_mask: max_timestamp,
782 address_space: address_space as u32,
783 pointer: merge_ptr as u32,
784 block_size: merge_size as u32,
785 lowest_block_size: ALIGN as u32,
786 type_size: size_of::<T>() as u32,
787 },
788 unsafe { slice_as_bytes(prev_values) },
790 &prev_ts_buf[..seg_idx],
791 );
792
793 max_timestamp
794 } else {
795 self.get_timestamp::<ALIGN>(address_space, pointer)
796 };
797
798 self.set_meta_block::<BLOCK_SIZE, ALIGN>(address_space, pointer, self.timestamp);
800 result
801 }
802
803 #[inline(always)]
805 fn handle_uninitialized_memory<T: Copy, const ALIGN: usize>(
806 &mut self,
807 address_space: usize,
808 pointer: usize,
809 ) {
810 if self.initial_block_size >= ALIGN {
811 let segment_index = pointer / ALIGN;
813 let block_start = segment_index & !(self.initial_block_size / ALIGN - 1);
814 let start_ptr = (block_start * ALIGN) as u32;
815 self.split_by_meta::<T, ALIGN>(
816 start_ptr,
817 INITIAL_TIMESTAMP,
818 self.initial_block_size as u8,
819 address_space,
820 );
821 } else {
822 debug_assert_eq!(self.initial_block_size, 1);
824 self.add_merge_record(
825 AccessRecordHeader {
826 timestamp_and_mask: INITIAL_TIMESTAMP,
827 address_space: address_space as u32,
828 pointer: pointer as u32,
829 block_size: ALIGN as u32,
830 lowest_block_size: self.initial_block_size as u32,
831 type_size: size_of::<T>() as u32,
832 },
833 &INITIAL_CELL_BUFFER[..ALIGN],
834 &INITIAL_TIMESTAMP_BUFFER[..ALIGN],
835 );
836 }
837 }
838
839 #[inline(always)]
861 pub unsafe fn read<T, const BLOCK_SIZE: usize, const ALIGN: usize>(
862 &mut self,
863 address_space: u32,
864 pointer: u32,
865 ) -> (u32, [T; BLOCK_SIZE])
866 where
867 T: Copy + Debug,
868 {
869 self.assert_alignment(BLOCK_SIZE, ALIGN, address_space, pointer);
870 let values = self.data.read(address_space, pointer);
871 let t_prev = self.prev_access_time::<T, BLOCK_SIZE, ALIGN>(
872 address_space as usize,
873 pointer as usize,
874 &values,
875 );
876 self.timestamp += 1;
877
878 (t_prev, values)
879 }
880
881 #[inline(always)]
903 pub unsafe fn write<T, const BLOCK_SIZE: usize, const ALIGN: usize>(
904 &mut self,
905 address_space: u32,
906 pointer: u32,
907 values: [T; BLOCK_SIZE],
908 ) -> (u32, [T; BLOCK_SIZE])
909 where
910 T: Copy + Debug,
911 {
912 self.assert_alignment(BLOCK_SIZE, ALIGN, address_space, pointer);
913 let values_prev = self.data.read(address_space, pointer);
914 let t_prev = self.prev_access_time::<T, BLOCK_SIZE, ALIGN>(
915 address_space as usize,
916 pointer as usize,
917 &values_prev,
918 );
919 self.data.write(address_space, pointer, values);
920 self.timestamp += 1;
921
922 (t_prev, values_prev)
923 }
924
925 pub fn increment_timestamp(&mut self) {
926 self.timestamp += 1;
927 }
928
929 pub fn increment_timestamp_by(&mut self, amount: u32) {
930 self.timestamp += amount;
931 }
932
933 pub fn timestamp(&self) -> u32 {
934 self.timestamp
935 }
936
937 #[instrument(name = "memory_finalize", skip_all)]
939 pub fn finalize<F: Field>(&mut self, is_persistent: bool) -> TouchedMemory<F> {
940 let touched_blocks = self.touched_blocks();
941
942 match is_persistent {
943 false => TouchedMemory::Volatile(
944 self.touched_blocks_to_equipartition::<F, 1>(touched_blocks),
945 ),
946 true => TouchedMemory::Persistent(
947 self.touched_blocks_to_equipartition::<F, CHUNK>(touched_blocks),
948 ),
949 }
950 }
951
952 fn touched_blocks(&self) -> Vec<(Address, AccessMetadata)> {
954 assert_eq!(self.meta.len(), self.min_block_size.len());
955 self.meta
956 .par_iter()
957 .zip(self.min_block_size.par_iter())
958 .enumerate()
959 .flat_map(|(addr_space, (meta_page, &align))| {
960 meta_page
961 .par_iter()
962 .filter_map(move |(idx, metadata)| {
963 let ptr = idx as u32 * align;
964 if metadata.offset_to_start == 0 && metadata.block_size() != 0 {
965 Some(((addr_space as u32, ptr), metadata))
966 } else {
967 None
968 }
969 })
970 .collect::<Vec<_>>()
971 })
972 .collect()
973 }
974
975 fn touched_blocks_to_equipartition<F: Field, const CHUNK: usize>(
978 &mut self,
979 touched_blocks: Vec<((u32, u32), AccessMetadata)>,
980 ) -> TimestampedEquipartition<F, CHUNK> {
981 let mut final_memory = Vec::new();
983
984 debug_assert!(touched_blocks.is_sorted_by_key(|(addr, _)| addr));
985 self.handle_touched_blocks::<F, CHUNK>(&mut final_memory, touched_blocks);
986
987 debug_assert!(final_memory.is_sorted_by_key(|(key, _)| *key));
988 final_memory
989 }
990
991 fn handle_touched_blocks<F: Field, const CHUNK: usize>(
992 &mut self,
993 final_memory: &mut Vec<((u32, u32), TimestampedValues<F, CHUNK>)>,
994 touched_blocks: Vec<((u32, u32), AccessMetadata)>,
995 ) {
996 let mut current_values = vec![0u8; MAX_CELL_BYTE_SIZE * CHUNK];
997 let mut current_cnt = 0;
998 let mut current_address = MemoryAddress::new(0, 0);
999 let mut current_timestamps = vec![0; CHUNK];
1000 for ((addr_space, ptr), access_metadata) in touched_blocks {
1001 let addr_space_config =
1003 unsafe { *self.data.memory.config.get_unchecked(addr_space as usize) };
1004 let min_block_size = addr_space_config.min_block_size;
1005 let cell_size = addr_space_config.layout.size();
1006 let timestamp = access_metadata.timestamp();
1007 let block_size = access_metadata.block_size();
1008 assert!(
1009 current_cnt == 0
1010 || (current_address.address_space == addr_space
1011 && current_address.pointer + current_cnt as u32 == ptr),
1012 "The union of all touched blocks must consist of blocks with sizes divisible by `CHUNK`"
1013 );
1014 debug_assert!(block_size >= min_block_size as u8);
1015 debug_assert!(ptr % min_block_size as u32 == 0);
1016
1017 if current_cnt == 0 {
1018 assert_eq!(
1019 ptr & (CHUNK as u32 - 1),
1020 0,
1021 "The union of all touched blocks must consist of `CHUNK`-aligned blocks"
1022 );
1023 current_address = MemoryAddress::new(addr_space, ptr);
1024 }
1025
1026 if block_size > min_block_size as u8 {
1027 self.add_split_record(AccessRecordHeader {
1028 timestamp_and_mask: timestamp,
1029 address_space: addr_space,
1030 pointer: ptr,
1031 block_size: block_size as u32,
1032 lowest_block_size: min_block_size as u32,
1033 type_size: cell_size as u32,
1034 });
1035 }
1036 if min_block_size > CHUNK {
1037 assert_eq!(current_cnt, 0);
1038 for i in (0..block_size as u32).step_by(min_block_size) {
1039 self.add_split_record(AccessRecordHeader {
1040 timestamp_and_mask: timestamp,
1041 address_space: addr_space,
1042 pointer: ptr + i,
1043 block_size: min_block_size as u32,
1044 lowest_block_size: CHUNK as u32,
1045 type_size: cell_size as u32,
1046 });
1047 }
1048 let values = unsafe {
1050 self.data.memory.get_u8_slice(
1051 addr_space,
1052 ptr as usize * cell_size,
1053 block_size as usize * cell_size,
1054 )
1055 };
1056 for i in (0..block_size as u32).step_by(CHUNK) {
1057 final_memory.push((
1058 (addr_space, ptr + i),
1059 TimestampedValues {
1060 timestamp,
1061 values: from_fn(|j| {
1062 let byte_idx = (i as usize + j) * cell_size;
1063 unsafe {
1066 addr_space_config
1067 .layout
1068 .to_field(&values[byte_idx..byte_idx + cell_size])
1069 }
1070 }),
1071 },
1072 ));
1073 }
1074 } else {
1075 for i in 0..block_size as u32 {
1076 let cell_data = unsafe {
1078 self.data.memory.get_u8_slice(
1079 addr_space,
1080 (ptr + i) as usize * cell_size,
1081 cell_size,
1082 )
1083 };
1084 current_values[current_cnt * cell_size..current_cnt * cell_size + cell_size]
1085 .copy_from_slice(cell_data);
1086 if current_cnt & (min_block_size - 1) == 0 {
1087 unsafe {
1089 *current_timestamps.get_unchecked_mut(current_cnt / min_block_size) =
1090 timestamp;
1091 }
1092 }
1093 current_cnt += 1;
1094 if current_cnt == CHUNK {
1095 let timestamp = *current_timestamps[..CHUNK / min_block_size]
1096 .iter()
1097 .max()
1098 .unwrap();
1099 self.add_merge_record(
1100 AccessRecordHeader {
1101 timestamp_and_mask: timestamp,
1102 address_space: addr_space,
1103 pointer: current_address.pointer,
1104 block_size: CHUNK as u32,
1105 lowest_block_size: min_block_size as u32,
1106 type_size: cell_size as u32,
1107 },
1108 ¤t_values[..CHUNK * cell_size],
1109 ¤t_timestamps[..CHUNK / min_block_size],
1110 );
1111 final_memory.push((
1112 (current_address.address_space, current_address.pointer),
1113 TimestampedValues {
1114 timestamp,
1115 values: from_fn(|i| unsafe {
1116 addr_space_config.layout.to_field(
1118 ¤t_values[i * cell_size..i * cell_size + cell_size],
1119 )
1120 }),
1121 },
1122 ));
1123 current_address.pointer += current_cnt as u32;
1124 current_cnt = 0;
1125 }
1126 }
1127 }
1128 }
1129 assert_eq!(current_cnt, 0, "The union of all touched blocks must consist of blocks with sizes divisible by `CHUNK`");
1130 }
1131
1132 pub fn address_space_alignment(&self) -> Vec<u8> {
1133 self.min_block_size
1134 .iter()
1135 .map(|&x| log2_strict_usize(x as usize) as u8)
1136 .collect()
1137 }
1138}