openvm_circuit/system/memory/online/
memmap.rs

1use std::{
2    fmt::Debug,
3    mem::{align_of, size_of, size_of_val},
4};
5
6use memmap2::MmapMut;
7
8use super::{LinearMemory, PAGE_SIZE};
9
10pub const CELL_STRIDE: usize = 1;
11
12/// Mmap-backed linear memory. OS-memory pages are paged in on-demand and zero-initialized.
13#[derive(Debug)]
14pub struct MmapMemory {
15    mmap: MmapMut,
16}
17
18impl Clone for MmapMemory {
19    fn clone(&self) -> Self {
20        let mut new_mmap = MmapMut::map_anon(self.mmap.len()).unwrap();
21        new_mmap.copy_from_slice(&self.mmap);
22        Self { mmap: new_mmap }
23    }
24}
25
26impl MmapMemory {
27    #[inline(always)]
28    pub fn as_ptr(&self) -> *const u8 {
29        self.mmap.as_ptr()
30    }
31
32    #[inline(always)]
33    pub fn as_mut_ptr(&mut self) -> *mut u8 {
34        self.mmap.as_mut_ptr()
35    }
36
37    #[cfg(not(feature = "unprotected"))]
38    #[inline(always)]
39    fn check_bounds(&self, start: usize, size: usize) {
40        let memory_size = self.size();
41        if start > memory_size || size > memory_size - start {
42            panic_oob(start, size, memory_size);
43        }
44    }
45
46    #[cfg(feature = "unprotected")]
47    #[inline(always)]
48    fn check_bounds(&self, start: usize, size: usize) {
49        let memory_size = self.size();
50        debug_assert!(
51            start <= memory_size && size <= memory_size - start,
52            "Memory access out of bounds: start={} size={} memory_size={}",
53            start,
54            size,
55            memory_size
56        );
57    }
58}
59
60impl LinearMemory for MmapMemory {
61    /// Create a new MmapMemory with the given `size` in bytes.
62    /// We round `size` up to be a multiple of the mmap page size (4kb by default).
63    fn new(mut size: usize) -> Self {
64        size = size.div_ceil(PAGE_SIZE) * PAGE_SIZE;
65        // anonymous mapping means pages are zero-initialized on first use
66        Self {
67            mmap: MmapMut::map_anon(size).unwrap(),
68        }
69    }
70
71    fn size(&self) -> usize {
72        self.mmap.len()
73    }
74
75    fn as_slice(&self) -> &[u8] {
76        &self.mmap
77    }
78
79    fn as_mut_slice(&mut self) -> &mut [u8] {
80        &mut self.mmap
81    }
82
83    #[cfg(target_os = "linux")]
84    fn fill_zero(&mut self) {
85        use libc::{madvise, MADV_DONTNEED};
86
87        let mmap = &mut self.mmap;
88        // SAFETY: our mmap is a memory-backed (not file-backed) anonymous private mapping.
89        // When we madvise MADV_DONTNEED, according to https://man7.org/linux/man-pages/man2/madvise.2.html
90        // > subsequent accesses of pages in the range will succeed, but
91        // > will result in either repopulating the memory contents from
92        // > the up-to-date contents of the underlying mapped file (for
93        // > shared file mappings, shared anonymous mappings, and shmem-
94        // > based techniques such as System V shared memory segments)
95        // > or zero-fill-on-demand pages for anonymous private
96        // > mappings.
97        unsafe {
98            let ret = madvise(
99                mmap.as_ptr() as *mut libc::c_void,
100                mmap.len(),
101                MADV_DONTNEED,
102            );
103            if ret != 0 {
104                // Fallback to write_bytes if madvise fails
105                std::ptr::write_bytes(mmap.as_mut_ptr(), 0, mmap.len());
106            }
107        }
108    }
109
110    #[inline(always)]
111    unsafe fn read<BLOCK: Copy>(&self, from: usize) -> BLOCK {
112        self.check_bounds(from, size_of::<BLOCK>());
113        let src = self.as_ptr().add(from) as *const BLOCK;
114        // SAFETY:
115        // - Bounds checked above (unless unprotected feature enabled)
116        // - We assume `src` is aligned to `BLOCK`
117        // - We assume `BLOCK` is "plain old data" so the underlying `src` bytes is valid to read as
118        //   an initialized value of `BLOCK`
119        core::ptr::read(src)
120    }
121
122    #[inline(always)]
123    unsafe fn read_unaligned<BLOCK: Copy>(&self, from: usize) -> BLOCK {
124        self.check_bounds(from, size_of::<BLOCK>());
125        let src = self.as_ptr().add(from) as *const BLOCK;
126        // SAFETY:
127        // - Bounds checked above (unless unprotected feature enabled)
128        // - We assume `BLOCK` is "plain old data" so the underlying `src` bytes is valid to read as
129        //   an initialized value of `BLOCK`
130        core::ptr::read_unaligned(src)
131    }
132
133    #[inline(always)]
134    unsafe fn write<BLOCK: Copy>(&mut self, start: usize, values: BLOCK) {
135        self.check_bounds(start, size_of::<BLOCK>());
136        let dst = self.as_mut_ptr().add(start) as *mut BLOCK;
137        // SAFETY:
138        // - Bounds checked above (unless unprotected feature enabled)
139        // - We assume `dst` is aligned to `BLOCK`
140        core::ptr::write(dst, values);
141    }
142
143    #[inline(always)]
144    unsafe fn write_unaligned<BLOCK: Copy>(&mut self, start: usize, values: BLOCK) {
145        self.check_bounds(start, size_of::<BLOCK>());
146        let dst = self.as_mut_ptr().add(start) as *mut BLOCK;
147        // SAFETY:
148        // - Bounds checked above (unless unprotected feature enabled)
149        core::ptr::write_unaligned(dst, values);
150    }
151
152    #[inline(always)]
153    unsafe fn swap<BLOCK: Copy>(&mut self, start: usize, values: &mut BLOCK) {
154        self.check_bounds(start, size_of::<BLOCK>());
155        // SAFETY:
156        // - Bounds checked above (unless unprotected feature enabled)
157        // - We assume `start` is aligned to `BLOCK`
158        core::ptr::swap(
159            self.as_mut_ptr().add(start) as *mut BLOCK,
160            values as *mut BLOCK,
161        );
162    }
163
164    #[inline(always)]
165    unsafe fn copy_nonoverlapping<T: Copy>(&mut self, to: usize, data: &[T]) {
166        self.check_bounds(to, size_of_val(data));
167        debug_assert_eq!(PAGE_SIZE % align_of::<T>(), 0);
168        let src = data.as_ptr();
169        let dst = self.as_mut_ptr().add(to) as *mut T;
170        // SAFETY:
171        // - Bounds checked above (unless unprotected feature enabled)
172        // - Assumes `to` is aligned to `T` and `self.as_mut_ptr()` is aligned to `T`, which implies
173        //   the same for `dst`.
174        core::ptr::copy_nonoverlapping::<T>(src, dst, data.len());
175    }
176
177    #[inline(always)]
178    unsafe fn get_aligned_slice<T: Copy>(&self, start: usize, len: usize) -> &[T] {
179        self.check_bounds(start, len * size_of::<T>());
180        let data = self.as_ptr().add(start) as *const T;
181        // SAFETY:
182        // - Bounds checked above (unless unprotected feature enabled)
183        // - Assumes `data` is aligned to `T`
184        // - `T` is "plain old data" (POD), so conversion from underlying bytes is properly
185        //   initialized
186        // - `self` will not be mutated while borrowed
187        core::slice::from_raw_parts(data, len)
188    }
189}
190
191#[cold]
192#[inline(never)]
193fn panic_oob(start: usize, size: usize, memory_size: usize) -> ! {
194    panic!(
195        "Memory access out of bounds: start={} size={} memory_size={}",
196        start, size, memory_size
197    );
198}