wasmer_vm/
mmap.rs

1// This file contains code from external sources.
2// Attributions: https://github.com/wasmerio/wasmer/blob/main/docs/ATTRIBUTIONS.md
3
4//! Low-level abstraction for allocating and managing zero-filled pages
5//! of memory.
6
7use more_asserts::assert_le;
8use std::io;
9use std::ptr;
10use std::slice;
11
12/// Round `size` up to the nearest multiple of `page_size`.
13fn round_up_to_page_size(size: usize, page_size: usize) -> usize {
14    (size + (page_size - 1)) & !(page_size - 1)
15}
16
17/// A simple struct consisting of a page-aligned pointer to page-aligned
18/// and initially-zeroed memory and a length.
19#[derive(Debug)]
20pub struct Mmap {
21    // Note that this is stored as a `usize` instead of a `*const` or `*mut`
22    // pointer to allow this structure to be natively `Send` and `Sync` without
23    // `unsafe impl`. This type is sendable across threads and shareable since
24    // the coordination all happens at the OS layer.
25    ptr: usize,
26    total_size: usize,
27    accessible_size: usize,
28    #[cfg_attr(target_os = "windows", allow(dead_code))]
29    sync_on_drop: bool,
30}
31
32/// The type of mmap to create
33#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
34pub enum MmapType {
35    /// The memory is private to the process and not shared with other processes.
36    Private,
37    /// The memory is shared with other processes. This is only supported on Unix.
38    /// When the memory is flushed it will update the file data.
39    Shared,
40}
41
42impl Mmap {
43    /// Construct a new empty instance of `Mmap`.
44    pub fn new() -> Self {
45        // Rust's slices require non-null pointers, even when empty. `Vec`
46        // contains code to create a non-null dangling pointer value when
47        // constructed empty, so we reuse that here.
48        let empty = Vec::<u8>::new();
49        Self {
50            ptr: empty.as_ptr() as usize,
51            total_size: 0,
52            accessible_size: 0,
53            sync_on_drop: false,
54        }
55    }
56
57    /// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory.
58    pub fn with_at_least(size: usize) -> Result<Self, String> {
59        let page_size = region::page::size();
60        let rounded_size = round_up_to_page_size(size, page_size);
61        Self::accessible_reserved(rounded_size, rounded_size, None, MmapType::Private)
62    }
63
64    /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
65    /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
66    /// must be native page-size multiples.
67    #[cfg(not(target_os = "windows"))]
68    pub fn accessible_reserved(
69        mut accessible_size: usize,
70        mapping_size: usize,
71        mut backing_file: Option<std::path::PathBuf>,
72        memory_type: MmapType,
73    ) -> Result<Self, String> {
74        use std::os::fd::IntoRawFd;
75
76        let page_size = region::page::size();
77        assert_le!(accessible_size, mapping_size);
78        assert_eq!(mapping_size & (page_size - 1), 0);
79        assert_eq!(accessible_size & (page_size - 1), 0);
80
81        // Mmap may return EINVAL if the size is zero, so just
82        // special-case that.
83        if mapping_size == 0 {
84            return Ok(Self::new());
85        }
86
87        // If there is a backing file, resize the file so that its at least
88        // `mapping_size` bytes.
89        let mut memory_fd = -1;
90        if let Some(backing_file_path) = &mut backing_file {
91            let file = std::fs::OpenOptions::new()
92                .read(true)
93                .write(true)
94                .open(&backing_file_path)
95                .map_err(|e| e.to_string())?;
96
97            let mut backing_file_accessible = backing_file_path.clone();
98            backing_file_accessible.set_extension("accessible");
99
100            let len = file.metadata().map_err(|e| e.to_string())?.len() as usize;
101            if len < mapping_size {
102                std::fs::write(&backing_file_accessible, format!("{len}").as_bytes()).ok();
103
104                file.set_len(mapping_size as u64)
105                    .map_err(|e| e.to_string())?;
106            }
107
108            if backing_file_accessible.exists() {
109                let accessible = std::fs::read_to_string(&backing_file_accessible)
110                    .map_err(|e| e.to_string())?
111                    .parse::<usize>()
112                    .map_err(|e| e.to_string())?;
113                accessible_size = accessible_size.max(accessible);
114            } else {
115                accessible_size = accessible_size.max(len);
116            }
117
118            accessible_size = accessible_size.min(mapping_size);
119            memory_fd = file.into_raw_fd();
120        }
121
122        // Compute the flags
123        let mut flags = match memory_fd {
124            fd if fd < 0 => libc::MAP_ANON,
125            _ => libc::MAP_FILE,
126        };
127        flags |= match memory_type {
128            MmapType::Private => libc::MAP_PRIVATE,
129            MmapType::Shared => libc::MAP_SHARED,
130        };
131
132        Ok(if accessible_size == mapping_size {
133            // Allocate a single read-write region at once.
134            let ptr = unsafe {
135                libc::mmap(
136                    ptr::null_mut(),
137                    mapping_size,
138                    libc::PROT_READ | libc::PROT_WRITE,
139                    flags,
140                    memory_fd,
141                    0,
142                )
143            };
144            if ptr as isize == -1_isize {
145                return Err(io::Error::last_os_error().to_string());
146            }
147
148            Self {
149                ptr: ptr as usize,
150                total_size: mapping_size,
151                accessible_size,
152                sync_on_drop: memory_fd != -1 && memory_type == MmapType::Shared,
153            }
154        } else {
155            // Reserve the mapping size.
156            let ptr = unsafe {
157                libc::mmap(
158                    ptr::null_mut(),
159                    mapping_size,
160                    libc::PROT_NONE,
161                    flags,
162                    memory_fd,
163                    0,
164                )
165            };
166            if ptr as isize == -1_isize {
167                return Err(io::Error::last_os_error().to_string());
168            }
169
170            let mut result = Self {
171                ptr: ptr as usize,
172                total_size: mapping_size,
173                accessible_size,
174                sync_on_drop: memory_fd != -1 && memory_type == MmapType::Shared,
175            };
176
177            if accessible_size != 0 {
178                // Commit the accessible size.
179                result.make_accessible(0, accessible_size)?;
180            }
181
182            result
183        })
184    }
185
186    /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
187    /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
188    /// must be native page-size multiples.
189    #[cfg(target_os = "windows")]
190    pub fn accessible_reserved(
191        accessible_size: usize,
192        mapping_size: usize,
193        _backing_file: Option<std::path::PathBuf>,
194        _memory_type: MmapType,
195    ) -> Result<Self, String> {
196        use windows_sys::Win32::System::Memory::{
197            MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE, VirtualAlloc,
198        };
199
200        let page_size = region::page::size();
201        assert_le!(accessible_size, mapping_size);
202        assert_eq!(mapping_size & (page_size - 1), 0);
203        assert_eq!(accessible_size & (page_size - 1), 0);
204
205        // VirtualAlloc may return ERROR_INVALID_PARAMETER if the size is zero,
206        // so just special-case that.
207        if mapping_size == 0 {
208            return Ok(Self::new());
209        }
210
211        Ok(if accessible_size == mapping_size {
212            // Allocate a single read-write region at once.
213            let ptr = unsafe {
214                VirtualAlloc(
215                    ptr::null_mut(),
216                    mapping_size,
217                    MEM_RESERVE | MEM_COMMIT,
218                    PAGE_READWRITE,
219                )
220            };
221            if ptr.is_null() {
222                return Err(io::Error::last_os_error().to_string());
223            }
224
225            Self {
226                ptr: ptr as usize,
227                total_size: mapping_size,
228                accessible_size,
229                sync_on_drop: false,
230            }
231        } else {
232            // Reserve the mapping size.
233            let ptr =
234                unsafe { VirtualAlloc(ptr::null_mut(), mapping_size, MEM_RESERVE, PAGE_NOACCESS) };
235            if ptr.is_null() {
236                return Err(io::Error::last_os_error().to_string());
237            }
238
239            let mut result = Self {
240                ptr: ptr as usize,
241                total_size: mapping_size,
242                accessible_size,
243                sync_on_drop: false,
244            };
245
246            if accessible_size != 0 {
247                // Commit the accessible size.
248                result.make_accessible(0, accessible_size)?;
249            }
250
251            result
252        })
253    }
254
255    /// Make the memory starting at `start` and extending for `len` bytes accessible.
256    /// `start` and `len` must be native page-size multiples and describe a range within
257    /// `self`'s reserved memory.
258    #[cfg(not(target_os = "windows"))]
259    pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
260        let page_size = region::page::size();
261        assert_eq!(start & (page_size - 1), 0);
262        assert_eq!(len & (page_size - 1), 0);
263        assert_le!(len, self.total_size);
264        assert_le!(start, self.total_size - len);
265
266        // Commit the accessible size.
267        let ptr = self.ptr as *const u8;
268        unsafe { region::protect(ptr.add(start), len, region::Protection::READ_WRITE) }
269            .map_err(|e| e.to_string())
270    }
271
272    /// Make the memory starting at `start` and extending for `len` bytes accessible.
273    /// `start` and `len` must be native page-size multiples and describe a range within
274    /// `self`'s reserved memory.
275    #[cfg(target_os = "windows")]
276    pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
277        use std::ffi::c_void;
278        use windows_sys::Win32::System::Memory::{MEM_COMMIT, PAGE_READWRITE, VirtualAlloc};
279        let page_size = region::page::size();
280        assert_eq!(start & (page_size - 1), 0);
281        assert_eq!(len & (page_size - 1), 0);
282        assert_le!(len, self.len());
283        assert_le!(start, self.len() - len);
284
285        // Commit the accessible size.
286        let ptr = self.ptr as *const u8;
287        if unsafe {
288            VirtualAlloc(
289                ptr.add(start) as *mut c_void,
290                len,
291                MEM_COMMIT,
292                PAGE_READWRITE,
293            )
294        }
295        .is_null()
296        {
297            return Err(io::Error::last_os_error().to_string());
298        }
299
300        Ok(())
301    }
302
303    /// Return the allocated memory as a slice of u8.
304    pub fn as_slice(&self) -> &[u8] {
305        unsafe { slice::from_raw_parts(self.ptr as *const u8, self.total_size) }
306    }
307
308    /// Return the allocated memory as a slice of u8.
309    pub fn as_slice_accessible(&self) -> &[u8] {
310        unsafe { slice::from_raw_parts(self.ptr as *const u8, self.accessible_size) }
311    }
312
313    /// Return the allocated memory as a slice of u8.
314    pub fn as_slice_arbitary(&self, size: usize) -> &[u8] {
315        let size = usize::min(size, self.total_size);
316        unsafe { slice::from_raw_parts(self.ptr as *const u8, size) }
317    }
318
319    /// Return the allocated memory as a mutable slice of u8.
320    pub fn as_mut_slice(&mut self) -> &mut [u8] {
321        unsafe { slice::from_raw_parts_mut(self.ptr as *mut u8, self.total_size) }
322    }
323
324    /// Return the allocated memory as a mutable slice of u8.
325    pub fn as_mut_slice_accessible(&mut self) -> &mut [u8] {
326        unsafe { slice::from_raw_parts_mut(self.ptr as *mut u8, self.accessible_size) }
327    }
328
329    /// Return the allocated memory as a mutable slice of u8.
330    pub fn as_mut_slice_arbitary(&mut self, size: usize) -> &mut [u8] {
331        let size = usize::min(size, self.total_size);
332        unsafe { slice::from_raw_parts_mut(self.ptr as *mut u8, size) }
333    }
334
335    /// Return the allocated memory as a pointer to u8.
336    pub fn as_ptr(&self) -> *const u8 {
337        self.ptr as *const u8
338    }
339
340    /// Return the allocated memory as a mutable pointer to u8.
341    pub fn as_mut_ptr(&mut self) -> *mut u8 {
342        self.ptr as *mut u8
343    }
344
345    /// Return the length of the allocated memory.
346    pub fn len(&self) -> usize {
347        self.total_size
348    }
349
350    /// Return whether any memory has been allocated.
351    pub fn is_empty(&self) -> bool {
352        self.len() == 0
353    }
354
355    /// Duplicate in a new memory mapping.
356    #[deprecated = "use `copy` instead"]
357    pub fn duplicate(&mut self, size_hint: Option<usize>) -> Result<Self, String> {
358        self.copy(size_hint)
359    }
360
361    /// Duplicate in a new memory mapping.
362    pub fn copy(&mut self, size_hint: Option<usize>) -> Result<Self, String> {
363        // NOTE: accessible_size != used size as the value is not
364        //       automatically updated when the pre-provisioned space is used
365        let mut copy_size = self.accessible_size;
366        if let Some(size_hint) = size_hint {
367            copy_size = usize::max(copy_size, size_hint);
368        }
369
370        let mut new =
371            Self::accessible_reserved(copy_size, self.total_size, None, MmapType::Private)?;
372        new.as_mut_slice_arbitary(copy_size)
373            .copy_from_slice(self.as_slice_arbitary(copy_size));
374        Ok(new)
375    }
376}
377
378impl Drop for Mmap {
379    #[cfg(not(target_os = "windows"))]
380    fn drop(&mut self) {
381        if self.total_size != 0 {
382            if self.sync_on_drop {
383                let r = unsafe {
384                    libc::msync(
385                        self.ptr as *mut libc::c_void,
386                        self.total_size,
387                        libc::MS_SYNC | libc::MS_INVALIDATE,
388                    )
389                };
390                assert_eq!(r, 0, "msync failed: {}", io::Error::last_os_error());
391            }
392            let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.total_size) };
393            assert_eq!(r, 0, "munmap failed: {}", io::Error::last_os_error());
394        }
395    }
396
397    #[cfg(target_os = "windows")]
398    fn drop(&mut self) {
399        if !self.is_empty() {
400            use std::ffi::c_void;
401            use windows_sys::Win32::System::Memory::{MEM_RELEASE, VirtualFree};
402            let r = unsafe { VirtualFree(self.ptr as *mut c_void, 0, MEM_RELEASE) };
403            assert_ne!(r, 0);
404        }
405    }
406}
407
408fn _assert() {
409    fn _assert_send_sync<T: Send + Sync>() {}
410    _assert_send_sync::<Mmap>();
411}
412
413#[cfg(test)]
414mod tests {
415    use super::*;
416
417    #[test]
418    fn test_round_up_to_page_size() {
419        assert_eq!(round_up_to_page_size(0, 4096), 0);
420        assert_eq!(round_up_to_page_size(1, 4096), 4096);
421        assert_eq!(round_up_to_page_size(4096, 4096), 4096);
422        assert_eq!(round_up_to_page_size(4097, 4096), 8192);
423    }
424}