wasmer_sys_utils/memory/fd_memory/
memories.rs1use std::{cell::UnsafeCell, convert::TryInto, ptr::NonNull, rc::Rc, sync::RwLock};
8
9use wasmer::{Bytes, MemoryError, MemoryType, Pages};
10use wasmer_types::{MemoryStyle, WASM_PAGE_SIZE};
11use wasmer_vm::{
12 LinearMemory, MaybeInstanceOwned, ThreadConditions, Trap, VMMemoryDefinition, WaiterError,
13};
14
15use super::fd_mmap::FdMmap;
16
17#[derive(Debug)]
29struct WasmMmap {
30 alloc: FdMmap,
32 size: Pages,
34 vm_memory_definition: MaybeInstanceOwned<VMMemoryDefinition>,
36}
37
38impl WasmMmap {
39 fn get_vm_memory_definition(&self) -> NonNull<VMMemoryDefinition> {
40 self.vm_memory_definition.as_ptr()
41 }
42
43 fn size(&self) -> Pages {
44 unsafe {
45 let md_ptr = self.get_vm_memory_definition();
46 let md = md_ptr.as_ref();
47 Bytes::from(md.current_length).try_into().unwrap()
48 }
49 }
50
51 fn grow(&mut self, delta: Pages, conf: VMMemoryConfig) -> Result<Pages, MemoryError> {
52 if delta.0 == 0 {
54 return Ok(self.size);
55 }
56
57 let new_pages = self
58 .size
59 .checked_add(delta)
60 .ok_or(MemoryError::CouldNotGrow {
61 current: self.size,
62 attempted_delta: delta,
63 })?;
64 let prev_pages = self.size;
65
66 if let Some(maximum) = conf.maximum {
67 if new_pages > maximum {
68 return Err(MemoryError::CouldNotGrow {
69 current: self.size,
70 attempted_delta: delta,
71 });
72 }
73 }
74
75 if new_pages >= Pages::max_value() {
79 return Err(MemoryError::CouldNotGrow {
81 current: self.size,
82 attempted_delta: delta,
83 });
84 }
85
86 let delta_bytes = delta.bytes().0;
87 let prev_bytes = prev_pages.bytes().0;
88 let new_bytes = new_pages.bytes().0;
89
90 if new_bytes > self.alloc.len() - conf.offset_guard_size {
91 let guard_bytes = conf.offset_guard_size;
94 let request_bytes =
95 new_bytes
96 .checked_add(guard_bytes)
97 .ok_or_else(|| MemoryError::CouldNotGrow {
98 current: new_pages,
99 attempted_delta: Bytes(guard_bytes).try_into().unwrap(),
100 })?;
101
102 let mut new_mmap = FdMmap::accessible_reserved(new_bytes, request_bytes)
103 .map_err(MemoryError::Region)?;
104
105 let copy_len = self.alloc.len() - conf.offset_guard_size;
106 new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.alloc.as_slice()[..copy_len]);
107
108 self.alloc = new_mmap;
109 } else if delta_bytes > 0 {
110 self.alloc
112 .make_accessible(prev_bytes, delta_bytes)
113 .map_err(MemoryError::Region)?;
114 }
115
116 self.size = new_pages;
117
118 unsafe {
120 let mut md_ptr = self.vm_memory_definition.as_ptr();
121 let md = md_ptr.as_mut();
122 md.current_length = new_pages.bytes().0;
123 md.base = self.alloc.as_mut_ptr() as _;
124 }
125
126 Ok(prev_pages)
127 }
128
129 fn grow_at_least(&mut self, min_size: u64, conf: VMMemoryConfig) -> Result<(), MemoryError> {
132 let cur_size = self.size.bytes().0 as u64;
133 if cur_size < min_size {
134 let growth = min_size - cur_size;
135 let growth_pages = ((growth - 1) / WASM_PAGE_SIZE as u64) + 1;
136 self.grow(Pages(growth_pages as u32), conf)?;
137 }
138
139 Ok(())
140 }
141
142 fn reset(&mut self) -> Result<(), MemoryError> {
143 self.size.0 = 0;
144 Ok(())
145 }
146
147 pub fn copy(&mut self) -> Result<Self, MemoryError> {
150 let mem_length = self.size.bytes().0;
151 let mut alloc = self
152 .alloc
153 .duplicate(Some(mem_length))
154 .map_err(MemoryError::Generic)?;
155 let base_ptr = alloc.as_mut_ptr();
156 Ok(Self {
157 vm_memory_definition: MaybeInstanceOwned::Host(Box::new(UnsafeCell::new(
158 VMMemoryDefinition {
159 base: base_ptr,
160 current_length: mem_length,
161 },
162 ))),
163 alloc,
164 size: self.size,
165 })
166 }
167}
168
169#[derive(Debug, Clone)]
171struct VMMemoryConfig {
172 maximum: Option<Pages>,
174 memory: MemoryType,
176 style: MemoryStyle,
178 offset_guard_size: usize,
181}
182
183impl VMMemoryConfig {
184 fn ty(&self, minimum: Pages) -> MemoryType {
185 let mut out = self.memory;
186 out.minimum = minimum;
187
188 out
189 }
190
191 fn style(&self) -> MemoryStyle {
192 self.style
193 }
194}
195
196#[derive(Debug)]
198pub struct VMOwnedMemory {
199 mmap: WasmMmap,
201 config: VMMemoryConfig,
203}
204
205unsafe impl Send for VMOwnedMemory {}
206unsafe impl Sync for VMOwnedMemory {}
207
208impl VMOwnedMemory {
209 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
214 unsafe { Self::new_internal(memory, style, None) }
215 }
216
217 pub unsafe fn from_definition(
225 memory: &MemoryType,
226 style: &MemoryStyle,
227 vm_memory_location: NonNull<VMMemoryDefinition>,
228 ) -> Result<Self, MemoryError> {
229 unsafe { Self::new_internal(memory, style, Some(vm_memory_location)) }
230 }
231
232 unsafe fn new_internal(
234 memory: &MemoryType,
235 style: &MemoryStyle,
236 vm_memory_location: Option<NonNull<VMMemoryDefinition>>,
237 ) -> Result<Self, MemoryError> {
238 if memory.minimum > Pages::max_value() {
239 return Err(MemoryError::MinimumMemoryTooLarge {
240 min_requested: memory.minimum,
241 max_allowed: Pages::max_value(),
242 });
243 }
244 if let Some(max) = memory.maximum {
246 if max > Pages::max_value() {
247 return Err(MemoryError::MaximumMemoryTooLarge {
248 max_requested: max,
249 max_allowed: Pages::max_value(),
250 });
251 }
252 if max < memory.minimum {
253 return Err(MemoryError::InvalidMemory {
254 reason: format!(
255 "the maximum ({} pages) is less than the minimum ({} pages)",
256 max.0, memory.minimum.0
257 ),
258 });
259 }
260 }
261
262 let offset_guard_bytes = style.offset_guard_size() as usize;
263
264 let minimum_pages = match style {
265 MemoryStyle::Dynamic { .. } => memory.minimum,
266 MemoryStyle::Static { bound, .. } => {
267 assert!(*bound >= memory.minimum);
268 *bound
269 }
270 };
271 let minimum_bytes = minimum_pages.bytes().0;
272 let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
273 let mapped_pages = memory.minimum;
274 let mapped_bytes = mapped_pages.bytes();
275
276 let mut alloc = FdMmap::accessible_reserved(mapped_bytes.0, request_bytes)
277 .map_err(MemoryError::Region)?;
278 let base_ptr = alloc.as_mut_ptr();
279 let mem_length = memory.minimum.bytes().0;
280 let mmap = WasmMmap {
281 vm_memory_definition: if let Some(mem_loc) = vm_memory_location {
282 {
283 let mut ptr = mem_loc;
284 let md = unsafe { ptr.as_mut() };
285 md.base = base_ptr;
286 md.current_length = mem_length;
287 }
288 MaybeInstanceOwned::Instance(mem_loc)
289 } else {
290 MaybeInstanceOwned::Host(Box::new(UnsafeCell::new(VMMemoryDefinition {
291 base: base_ptr,
292 current_length: mem_length,
293 })))
294 },
295 alloc,
296 size: memory.minimum,
297 };
298
299 Ok(Self {
300 mmap,
301 config: VMMemoryConfig {
302 maximum: memory.maximum,
303 offset_guard_size: offset_guard_bytes,
304 memory: *memory,
305 style: *style,
306 },
307 })
308 }
309
310 pub fn to_shared(self) -> VMSharedMemory {
312 VMSharedMemory {
313 mmap: Rc::new(RwLock::new(self.mmap)),
314 config: self.config,
315 conditions: ThreadConditions::new(),
316 }
317 }
318
319 pub fn copy(&mut self) -> Result<Self, MemoryError> {
321 Ok(Self {
322 mmap: self.mmap.copy()?,
323 config: self.config.clone(),
324 })
325 }
326}
327
328impl LinearMemory for VMOwnedMemory {
329 fn ty(&self) -> MemoryType {
331 let minimum = self.mmap.size();
332 self.config.ty(minimum)
333 }
334
335 fn size(&self) -> Pages {
337 self.mmap.size()
338 }
339
340 fn style(&self) -> MemoryStyle {
342 self.config.style()
343 }
344
345 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError> {
350 self.mmap.grow(delta, self.config.clone())
351 }
352
353 fn grow_at_least(&mut self, min_size: u64) -> Result<(), MemoryError> {
356 self.mmap.grow_at_least(min_size, self.config.clone())
357 }
358
359 fn reset(&mut self) -> Result<(), MemoryError> {
360 self.mmap.reset()?;
361 Ok(())
362 }
363
364 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
366 self.mmap.vm_memory_definition.as_ptr()
367 }
368
369 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
371 Err(MemoryError::MemoryNotShared)
372 }
373
374 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
376 let forked = Self::copy(self)?;
377 Ok(Box::new(forked))
378 }
379}
380
381#[derive(Debug, Clone)]
383pub struct VMSharedMemory {
384 mmap: Rc<RwLock<WasmMmap>>,
386 config: VMMemoryConfig,
388 conditions: ThreadConditions,
389}
390
391unsafe impl Send for VMSharedMemory {}
392unsafe impl Sync for VMSharedMemory {}
393
394impl VMSharedMemory {
395 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
400 Ok(VMOwnedMemory::new(memory, style)?.to_shared())
401 }
402
403 pub unsafe fn from_definition(
411 memory: &MemoryType,
412 style: &MemoryStyle,
413 vm_memory_location: NonNull<VMMemoryDefinition>,
414 ) -> Result<Self, MemoryError> {
415 let owned = unsafe { VMOwnedMemory::from_definition(memory, style, vm_memory_location)? };
416 Ok(owned.to_shared())
417 }
418
419 pub fn copy(&mut self) -> Result<Self, MemoryError> {
421 let mut guard = self.mmap.write().unwrap();
422 Ok(Self {
423 mmap: Rc::new(RwLock::new(guard.copy()?)),
424 config: self.config.clone(),
425 conditions: ThreadConditions::new(),
426 })
427 }
428}
429
430impl LinearMemory for VMSharedMemory {
431 fn ty(&self) -> MemoryType {
433 let minimum = {
434 let guard = self.mmap.read().unwrap();
435 guard.size()
436 };
437 self.config.ty(minimum)
438 }
439
440 fn size(&self) -> Pages {
442 let guard = self.mmap.read().unwrap();
443 guard.size()
444 }
445
446 fn reset(&mut self) -> Result<(), MemoryError> {
448 let mut guard = self.mmap.write().unwrap();
449 guard.reset()?;
450 Ok(())
451 }
452
453 fn style(&self) -> MemoryStyle {
455 self.config.style()
456 }
457
458 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError> {
463 let mut guard = self.mmap.write().unwrap();
464 guard.grow(delta, self.config.clone())
465 }
466
467 fn grow_at_least(&mut self, min_size: u64) -> Result<(), MemoryError> {
470 let mut guard = self.mmap.write().unwrap();
471 guard.grow_at_least(min_size, self.config.clone())
472 }
473
474 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
476 let guard = self.mmap.read().unwrap();
477 guard.vm_memory_definition.as_ptr()
478 }
479
480 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
482 Ok(Box::new(self.clone()))
483 }
484
485 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
487 let forked = Self::copy(self)?;
488 Ok(Box::new(forked))
489 }
490
491 fn do_wait(
492 &mut self,
493 dst: wasmer_vm::NotifyLocation,
494 timeout: Option<std::time::Duration>,
495 ) -> Result<u32, WaiterError> {
496 self.conditions.do_wait(dst, timeout)
497 }
498
499 fn do_notify(&mut self, dst: wasmer_vm::NotifyLocation, count: u32) -> u32 {
500 self.conditions.do_notify(dst, count)
501 }
502}
503
504impl From<VMOwnedMemory> for VMMemory {
505 fn from(mem: VMOwnedMemory) -> Self {
506 Self(Box::new(mem))
507 }
508}
509
510impl From<VMSharedMemory> for VMMemory {
511 fn from(mem: VMSharedMemory) -> Self {
512 Self(Box::new(mem))
513 }
514}
515
516#[derive(Debug)]
518pub struct VMMemory(pub Box<dyn LinearMemory + 'static>);
519
520impl From<Box<dyn LinearMemory + 'static>> for VMMemory {
521 fn from(mem: Box<dyn LinearMemory + 'static>) -> Self {
522 Self(mem)
523 }
524}
525
526impl LinearMemory for VMMemory {
527 fn ty(&self) -> MemoryType {
529 self.0.ty()
530 }
531
532 fn size(&self) -> Pages {
534 self.0.size()
535 }
536
537 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError> {
542 self.0.grow(delta)
543 }
544
545 fn grow_at_least(&mut self, min_size: u64) -> Result<(), MemoryError> {
548 self.0.grow_at_least(min_size)
549 }
550
551 fn reset(&mut self) -> Result<(), MemoryError> {
553 self.0.reset()?;
554 Ok(())
555 }
556
557 fn style(&self) -> MemoryStyle {
559 self.0.style()
560 }
561
562 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
564 self.0.vmmemory()
565 }
566
567 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
569 self.0.try_clone()
570 }
571
572 unsafe fn initialize_with_data(&self, start: usize, data: &[u8]) -> Result<(), Trap> {
574 unsafe { self.0.initialize_with_data(start, data) }
575 }
576
577 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
579 self.0.copy()
580 }
581}
582
583impl VMMemory {
584 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
590 Ok(if memory.shared {
591 Self(Box::new(VMSharedMemory::new(memory, style)?))
592 } else {
593 Self(Box::new(VMOwnedMemory::new(memory, style)?))
594 })
595 }
596
597 pub fn get_runtime_size(&self) -> u32 {
599 self.0.size().0
600 }
601
602 pub unsafe fn from_definition(
610 memory: &MemoryType,
611 style: &MemoryStyle,
612 vm_memory_location: NonNull<VMMemoryDefinition>,
613 ) -> Result<Self, MemoryError> {
614 Ok(if memory.shared {
615 let shared =
616 unsafe { VMSharedMemory::from_definition(memory, style, vm_memory_location)? };
617 Self(Box::new(shared))
618 } else {
619 let owned =
620 unsafe { VMOwnedMemory::from_definition(memory, style, vm_memory_location)? };
621 Self(Box::new(owned))
622 })
623 }
624
625 pub fn from_custom<IntoVMMemory>(memory: IntoVMMemory) -> Self
630 where
631 IntoVMMemory: Into<Self>,
632 {
633 memory.into()
634 }
635
636 pub fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
638 LinearMemory::copy(self)
639 }
640}
641
642#[doc(hidden)]
643pub unsafe fn initialize_memory_with_data(
645 memory: &VMMemoryDefinition,
646 start: usize,
647 data: &[u8],
648) -> Result<(), Trap> {
649 let mem_slice = unsafe { std::slice::from_raw_parts_mut(memory.base, memory.current_length) };
650 let end = start + data.len();
651 let to_init = &mut mem_slice[start..end];
652 to_init.copy_from_slice(data);
653
654 Ok(())
655}