wasmer_vm/
mmap.rs

1// This file contains code from external sources.
2// Attributions: https://github.com/wasmerio/wasmer/blob/main/docs/ATTRIBUTIONS.md
3
4//! Low-level abstraction for allocating and managing zero-filled pages
5//! of memory.
6
7use more_asserts::assert_le;
8use std::io;
9use std::ptr;
10use std::slice;
11
12/// A simple struct consisting of a page-aligned pointer to page-aligned
13/// and initially-zeroed memory and a length.
14#[derive(Debug)]
15pub struct Mmap {
16    // Note that this is stored as a `usize` instead of a `*const` or `*mut`
17    // pointer to allow this structure to be natively `Send` and `Sync` without
18    // `unsafe impl`. This type is sendable across threads and shareable since
19    // the coordination all happens at the OS layer.
20    ptr: usize,
21    total_size: usize,
22    accessible_size: usize,
23    #[cfg_attr(target_os = "windows", allow(dead_code))]
24    sync_on_drop: bool,
25}
26
27/// The type of mmap to create
28#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
29pub enum MmapType {
30    /// The memory is private to the process and not shared with other processes.
31    Private,
32    /// The memory is shared with other processes. This is only supported on Unix.
33    /// When the memory is flushed it will update the file data.
34    Shared,
35}
36
37impl Mmap {
38    /// Construct a new empty instance of `Mmap`.
39    pub fn new() -> Self {
40        // Rust's slices require non-null pointers, even when empty. `Vec`
41        // contains code to create a non-null dangling pointer value when
42        // constructed empty, so we reuse that here.
43        let empty = Vec::<u8>::new();
44        Self {
45            ptr: empty.as_ptr() as usize,
46            total_size: 0,
47            accessible_size: 0,
48            sync_on_drop: false,
49        }
50    }
51
52    /// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory.
53    pub fn with_at_least(size: usize) -> Result<Self, String> {
54        let page_size = region::page::size();
55        let rounded_size = size.next_multiple_of(page_size);
56        Self::accessible_reserved(rounded_size, rounded_size, None, MmapType::Private)
57    }
58
59    /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
60    /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
61    /// must be native page-size multiples.
62    #[cfg(not(target_os = "windows"))]
63    pub fn accessible_reserved(
64        mut accessible_size: usize,
65        mapping_size: usize,
66        mut backing_file: Option<std::path::PathBuf>,
67        memory_type: MmapType,
68    ) -> Result<Self, String> {
69        use std::os::fd::IntoRawFd;
70
71        let page_size = region::page::size();
72        assert_le!(accessible_size, mapping_size);
73        assert_eq!(mapping_size & (page_size - 1), 0);
74        assert_eq!(accessible_size & (page_size - 1), 0);
75
76        // Mmap may return EINVAL if the size is zero, so just
77        // special-case that.
78        if mapping_size == 0 {
79            return Ok(Self::new());
80        }
81
82        // If there is a backing file, resize the file so that its at least
83        // `mapping_size` bytes.
84        let mut memory_fd = -1;
85        if let Some(backing_file_path) = &mut backing_file {
86            let file = std::fs::OpenOptions::new()
87                .read(true)
88                .write(true)
89                .open(&backing_file_path)
90                .map_err(|e| e.to_string())?;
91
92            let mut backing_file_accessible = backing_file_path.clone();
93            backing_file_accessible.set_extension("accessible");
94
95            let len = file.metadata().map_err(|e| e.to_string())?.len() as usize;
96            if len < mapping_size {
97                std::fs::write(&backing_file_accessible, format!("{len}").as_bytes()).ok();
98
99                file.set_len(mapping_size as u64)
100                    .map_err(|e| e.to_string())?;
101            }
102
103            if backing_file_accessible.exists() {
104                let accessible = std::fs::read_to_string(&backing_file_accessible)
105                    .map_err(|e| e.to_string())?
106                    .parse::<usize>()
107                    .map_err(|e| e.to_string())?;
108                accessible_size = accessible_size.max(accessible);
109            } else {
110                accessible_size = accessible_size.max(len);
111            }
112
113            accessible_size = accessible_size.min(mapping_size);
114            memory_fd = file.into_raw_fd();
115        }
116
117        // Compute the flags
118        let mut flags = match memory_fd {
119            fd if fd < 0 => libc::MAP_ANON,
120            _ => libc::MAP_FILE,
121        };
122        flags |= match memory_type {
123            MmapType::Private => libc::MAP_PRIVATE,
124            MmapType::Shared => libc::MAP_SHARED,
125        };
126
127        Ok(if accessible_size == mapping_size {
128            // Allocate a single read-write region at once.
129            let ptr = unsafe {
130                libc::mmap(
131                    ptr::null_mut(),
132                    mapping_size,
133                    libc::PROT_READ | libc::PROT_WRITE,
134                    flags,
135                    memory_fd,
136                    0,
137                )
138            };
139            if ptr as isize == -1_isize {
140                return Err(io::Error::last_os_error().to_string());
141            }
142
143            Self {
144                ptr: ptr as usize,
145                total_size: mapping_size,
146                accessible_size,
147                sync_on_drop: memory_fd != -1 && memory_type == MmapType::Shared,
148            }
149        } else {
150            // Reserve the mapping size.
151            let ptr = unsafe {
152                libc::mmap(
153                    ptr::null_mut(),
154                    mapping_size,
155                    libc::PROT_NONE,
156                    flags,
157                    memory_fd,
158                    0,
159                )
160            };
161            if ptr as isize == -1_isize {
162                return Err(io::Error::last_os_error().to_string());
163            }
164
165            let mut result = Self {
166                ptr: ptr as usize,
167                total_size: mapping_size,
168                accessible_size,
169                sync_on_drop: memory_fd != -1 && memory_type == MmapType::Shared,
170            };
171
172            if accessible_size != 0 {
173                // Commit the accessible size.
174                result.make_accessible(0, accessible_size)?;
175            }
176
177            result
178        })
179    }
180
181    /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
182    /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
183    /// must be native page-size multiples.
184    #[cfg(target_os = "windows")]
185    pub fn accessible_reserved(
186        accessible_size: usize,
187        mapping_size: usize,
188        _backing_file: Option<std::path::PathBuf>,
189        _memory_type: MmapType,
190    ) -> Result<Self, String> {
191        use windows_sys::Win32::System::Memory::{
192            MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE, VirtualAlloc,
193        };
194
195        let page_size = region::page::size();
196        assert_le!(accessible_size, mapping_size);
197        assert_eq!(mapping_size & (page_size - 1), 0);
198        assert_eq!(accessible_size & (page_size - 1), 0);
199
200        // VirtualAlloc may return ERROR_INVALID_PARAMETER if the size is zero,
201        // so just special-case that.
202        if mapping_size == 0 {
203            return Ok(Self::new());
204        }
205
206        Ok(if accessible_size == mapping_size {
207            // Allocate a single read-write region at once.
208            let ptr = unsafe {
209                VirtualAlloc(
210                    ptr::null_mut(),
211                    mapping_size,
212                    MEM_RESERVE | MEM_COMMIT,
213                    PAGE_READWRITE,
214                )
215            };
216            if ptr.is_null() {
217                return Err(io::Error::last_os_error().to_string());
218            }
219
220            Self {
221                ptr: ptr as usize,
222                total_size: mapping_size,
223                accessible_size,
224                sync_on_drop: false,
225            }
226        } else {
227            // Reserve the mapping size.
228            let ptr =
229                unsafe { VirtualAlloc(ptr::null_mut(), mapping_size, MEM_RESERVE, PAGE_NOACCESS) };
230            if ptr.is_null() {
231                return Err(io::Error::last_os_error().to_string());
232            }
233
234            let mut result = Self {
235                ptr: ptr as usize,
236                total_size: mapping_size,
237                accessible_size,
238                sync_on_drop: false,
239            };
240
241            if accessible_size != 0 {
242                // Commit the accessible size.
243                result.make_accessible(0, accessible_size)?;
244            }
245
246            result
247        })
248    }
249
250    /// Make the memory starting at `start` and extending for `len` bytes accessible.
251    /// `start` and `len` must be native page-size multiples and describe a range within
252    /// `self`'s reserved memory.
253    #[cfg(not(target_os = "windows"))]
254    pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
255        let page_size = region::page::size();
256        assert_eq!(start & (page_size - 1), 0);
257        assert_eq!(len & (page_size - 1), 0);
258        assert_le!(len, self.total_size);
259        assert_le!(start, self.total_size - len);
260
261        // Commit the accessible size.
262        let ptr = self.ptr as *const u8;
263        unsafe { region::protect(ptr.add(start), len, region::Protection::READ_WRITE) }
264            .map_err(|e| e.to_string())
265    }
266
267    /// Make the memory starting at `start` and extending for `len` bytes accessible.
268    /// `start` and `len` must be native page-size multiples and describe a range within
269    /// `self`'s reserved memory.
270    #[cfg(target_os = "windows")]
271    pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
272        use std::ffi::c_void;
273        use windows_sys::Win32::System::Memory::{MEM_COMMIT, PAGE_READWRITE, VirtualAlloc};
274        let page_size = region::page::size();
275        assert_eq!(start & (page_size - 1), 0);
276        assert_eq!(len & (page_size - 1), 0);
277        assert_le!(len, self.len());
278        assert_le!(start, self.len() - len);
279
280        // Commit the accessible size.
281        let ptr = self.ptr as *const u8;
282        if unsafe {
283            VirtualAlloc(
284                ptr.add(start) as *mut c_void,
285                len,
286                MEM_COMMIT,
287                PAGE_READWRITE,
288            )
289        }
290        .is_null()
291        {
292            return Err(io::Error::last_os_error().to_string());
293        }
294
295        Ok(())
296    }
297
298    /// Return the allocated memory as a slice of u8.
299    pub fn as_slice(&self) -> &[u8] {
300        unsafe { slice::from_raw_parts(self.ptr as *const u8, self.total_size) }
301    }
302
303    /// Return the allocated memory as a slice of u8.
304    pub fn as_slice_accessible(&self) -> &[u8] {
305        unsafe { slice::from_raw_parts(self.ptr as *const u8, self.accessible_size) }
306    }
307
308    /// Return the allocated memory as a slice of u8.
309    pub fn as_slice_arbitary(&self, size: usize) -> &[u8] {
310        let size = usize::min(size, self.total_size);
311        unsafe { slice::from_raw_parts(self.ptr as *const u8, size) }
312    }
313
314    /// Return the allocated memory as a mutable slice of u8.
315    pub fn as_mut_slice(&mut self) -> &mut [u8] {
316        unsafe { slice::from_raw_parts_mut(self.ptr as *mut u8, self.total_size) }
317    }
318
319    /// Return the allocated memory as a mutable slice of u8.
320    pub fn as_mut_slice_accessible(&mut self) -> &mut [u8] {
321        unsafe { slice::from_raw_parts_mut(self.ptr as *mut u8, self.accessible_size) }
322    }
323
324    /// Return the allocated memory as a mutable slice of u8.
325    pub fn as_mut_slice_arbitary(&mut self, size: usize) -> &mut [u8] {
326        let size = usize::min(size, self.total_size);
327        unsafe { slice::from_raw_parts_mut(self.ptr as *mut u8, size) }
328    }
329
330    /// Return the allocated memory as a pointer to u8.
331    pub fn as_ptr(&self) -> *const u8 {
332        self.ptr as *const u8
333    }
334
335    /// Return the allocated memory as a mutable pointer to u8.
336    pub fn as_mut_ptr(&mut self) -> *mut u8 {
337        self.ptr as *mut u8
338    }
339
340    /// Return the length of the allocated memory.
341    pub fn len(&self) -> usize {
342        self.total_size
343    }
344
345    /// Return whether any memory has been allocated.
346    pub fn is_empty(&self) -> bool {
347        self.len() == 0
348    }
349
350    /// Duplicate in a new memory mapping.
351    #[deprecated = "use `copy` instead"]
352    pub fn duplicate(&mut self, size_hint: Option<usize>) -> Result<Self, String> {
353        self.copy(size_hint)
354    }
355
356    /// Duplicate in a new memory mapping.
357    pub fn copy(&mut self, size_hint: Option<usize>) -> Result<Self, String> {
358        // NOTE: accessible_size != used size as the value is not
359        //       automatically updated when the pre-provisioned space is used
360        let mut copy_size = self.accessible_size;
361        if let Some(size_hint) = size_hint {
362            copy_size = usize::max(copy_size, size_hint);
363        }
364
365        let mut new =
366            Self::accessible_reserved(copy_size, self.total_size, None, MmapType::Private)?;
367        new.as_mut_slice_arbitary(copy_size)
368            .copy_from_slice(self.as_slice_arbitary(copy_size));
369        Ok(new)
370    }
371}
372
373impl Drop for Mmap {
374    #[cfg(not(target_os = "windows"))]
375    fn drop(&mut self) {
376        if self.total_size != 0 {
377            if self.sync_on_drop {
378                let r = unsafe {
379                    libc::msync(
380                        self.ptr as *mut libc::c_void,
381                        self.total_size,
382                        libc::MS_SYNC | libc::MS_INVALIDATE,
383                    )
384                };
385                assert_eq!(r, 0, "msync failed: {}", io::Error::last_os_error());
386            }
387            let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.total_size) };
388            assert_eq!(r, 0, "munmap failed: {}", io::Error::last_os_error());
389        }
390    }
391
392    #[cfg(target_os = "windows")]
393    fn drop(&mut self) {
394        if !self.is_empty() {
395            use std::ffi::c_void;
396            use windows_sys::Win32::System::Memory::{MEM_RELEASE, VirtualFree};
397            let r = unsafe { VirtualFree(self.ptr as *mut c_void, 0, MEM_RELEASE) };
398            assert_ne!(r, 0);
399        }
400    }
401}
402
403fn _assert() {
404    fn _assert_send_sync<T: Send + Sync>() {}
405    _assert_send_sync::<Mmap>();
406}