1use crate::threadconditions::ThreadConditions;
9pub use crate::threadconditions::{NotifyLocation, WaiterError};
10use crate::trap::Trap;
11use crate::{
12 mmap::{Mmap, MmapType},
13 store::MaybeInstanceOwned,
14 threadconditions::ExpectedValue,
15 vmcontext::VMMemoryDefinition,
16};
17use more_asserts::assert_ge;
18use std::cell::UnsafeCell;
19use std::convert::TryInto;
20use std::ptr::NonNull;
21use std::rc::Rc;
22use std::slice;
23use std::sync::RwLock;
24use std::time::Duration;
25use wasmer_types::{Bytes, MemoryError, MemoryStyle, MemoryType, Pages, WASM_PAGE_SIZE};
26
27#[derive(Debug)]
29struct WasmMmap {
30 alloc: Mmap,
32 size: Pages,
34 vm_memory_definition: MaybeInstanceOwned<VMMemoryDefinition>,
36}
37
38impl WasmMmap {
39 fn get_vm_memory_definition(&self) -> NonNull<VMMemoryDefinition> {
40 self.vm_memory_definition.as_ptr()
41 }
42
43 fn size(&self) -> Pages {
44 unsafe {
45 let md_ptr = self.get_vm_memory_definition();
46 let md = md_ptr.as_ref();
47 Bytes::from(md.current_length).try_into().unwrap()
48 }
49 }
50
51 fn grow(&mut self, delta: Pages, conf: VMMemoryConfig) -> Result<Pages, MemoryError> {
52 if delta.0 == 0 {
54 return Ok(self.size);
55 }
56
57 let new_pages = self
58 .size
59 .checked_add(delta)
60 .ok_or(MemoryError::CouldNotGrow {
61 current: self.size,
62 attempted_delta: delta,
63 })?;
64 let prev_pages = self.size;
65
66 if let Some(maximum) = conf.maximum
67 && new_pages > maximum
68 {
69 return Err(MemoryError::CouldNotGrow {
70 current: self.size,
71 attempted_delta: delta,
72 });
73 }
74
75 if new_pages > Pages::max_value() {
79 return Err(MemoryError::CouldNotGrow {
81 current: self.size,
82 attempted_delta: delta,
83 });
84 }
85
86 let delta_bytes = delta.bytes().0;
87 let prev_bytes = prev_pages.bytes().0;
88 let new_bytes = new_pages.bytes().0;
89
90 if new_bytes > self.alloc.len() - conf.offset_guard_size {
91 let guard_bytes = conf.offset_guard_size;
94 let request_bytes =
95 new_bytes
96 .checked_add(guard_bytes)
97 .ok_or_else(|| MemoryError::CouldNotGrow {
98 current: new_pages,
99 attempted_delta: Bytes(guard_bytes).try_into().unwrap(),
100 })?;
101
102 let mut new_mmap =
103 Mmap::accessible_reserved(new_bytes, request_bytes, None, MmapType::Private)
104 .map_err(MemoryError::Region)?;
105
106 let copy_len = self.alloc.len() - conf.offset_guard_size;
107 new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.alloc.as_slice()[..copy_len]);
108
109 self.alloc = new_mmap;
110 } else if delta_bytes > 0 {
111 self.alloc
113 .make_accessible(prev_bytes, delta_bytes)
114 .map_err(MemoryError::Region)?;
115 }
116
117 self.size = new_pages;
118
119 unsafe {
121 let mut md_ptr = self.vm_memory_definition.as_ptr();
122 let md = md_ptr.as_mut();
123 md.current_length = new_pages.bytes().0;
124 md.base = self.alloc.as_mut_ptr() as _;
125 }
126
127 Ok(prev_pages)
128 }
129
130 fn grow_at_least(&mut self, min_size: u64, conf: VMMemoryConfig) -> Result<(), MemoryError> {
133 let cur_size = self.size.bytes().0 as u64;
134 if cur_size < min_size {
135 let growth = min_size - cur_size;
136 let growth_pages = ((growth - 1) / WASM_PAGE_SIZE as u64) + 1;
137 self.grow(Pages(growth_pages as u32), conf)?;
138 }
139
140 Ok(())
141 }
142
143 fn reset(&mut self) -> Result<(), MemoryError> {
145 self.size.0 = 0;
146 unsafe {
148 let mut md_ptr = self.vm_memory_definition.as_ptr();
149 let md = md_ptr.as_mut();
150 md.current_length = 0;
151 }
152 Ok(())
153 }
154
155 pub fn copy(&mut self) -> Result<Self, MemoryError> {
158 let mem_length = self.size.bytes().0;
159 let mut alloc = self
160 .alloc
161 .copy(Some(mem_length))
162 .map_err(MemoryError::Generic)?;
163 let base_ptr = alloc.as_mut_ptr();
164 Ok(Self {
165 vm_memory_definition: MaybeInstanceOwned::Host(Box::new(UnsafeCell::new(
166 VMMemoryDefinition {
167 base: base_ptr,
168 current_length: mem_length,
169 },
170 ))),
171 alloc,
172 size: self.size,
173 })
174 }
175}
176
177#[derive(Debug, Clone)]
179struct VMMemoryConfig {
180 maximum: Option<Pages>,
182 memory: MemoryType,
184 style: MemoryStyle,
186 offset_guard_size: usize,
189}
190
191impl VMMemoryConfig {
192 fn ty(&self, minimum: Pages) -> MemoryType {
193 let mut out = self.memory;
194 out.minimum = minimum;
195
196 out
197 }
198
199 fn style(&self) -> MemoryStyle {
200 self.style
201 }
202}
203
204#[derive(Debug)]
206pub struct VMOwnedMemory {
207 mmap: WasmMmap,
209 config: VMMemoryConfig,
211}
212
213unsafe impl Send for VMOwnedMemory {}
214unsafe impl Sync for VMOwnedMemory {}
215
216impl VMOwnedMemory {
217 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
222 unsafe { Self::new_internal(memory, style, None, None, MmapType::Private) }
223 }
224
225 pub fn new_with_file(
232 memory: &MemoryType,
233 style: &MemoryStyle,
234 backing_file: std::path::PathBuf,
235 memory_type: MmapType,
236 ) -> Result<Self, MemoryError> {
237 unsafe { Self::new_internal(memory, style, None, Some(backing_file), memory_type) }
238 }
239
240 pub unsafe fn from_definition(
248 memory: &MemoryType,
249 style: &MemoryStyle,
250 vm_memory_location: NonNull<VMMemoryDefinition>,
251 ) -> Result<Self, MemoryError> {
252 unsafe {
253 Self::new_internal(
254 memory,
255 style,
256 Some(vm_memory_location),
257 None,
258 MmapType::Private,
259 )
260 }
261 }
262
263 pub unsafe fn from_definition_with_file(
273 memory: &MemoryType,
274 style: &MemoryStyle,
275 vm_memory_location: NonNull<VMMemoryDefinition>,
276 backing_file: Option<std::path::PathBuf>,
277 memory_type: MmapType,
278 ) -> Result<Self, MemoryError> {
279 unsafe {
280 Self::new_internal(
281 memory,
282 style,
283 Some(vm_memory_location),
284 backing_file,
285 memory_type,
286 )
287 }
288 }
289
290 unsafe fn new_internal(
292 memory: &MemoryType,
293 style: &MemoryStyle,
294 vm_memory_location: Option<NonNull<VMMemoryDefinition>>,
295 backing_file: Option<std::path::PathBuf>,
296 memory_type: MmapType,
297 ) -> Result<Self, MemoryError> {
298 unsafe {
299 if memory.minimum > Pages::max_value() {
300 return Err(MemoryError::MinimumMemoryTooLarge {
301 min_requested: memory.minimum,
302 max_allowed: Pages::max_value(),
303 });
304 }
305 if let Some(max) = memory.maximum {
307 if max > Pages::max_value() {
308 return Err(MemoryError::MaximumMemoryTooLarge {
309 max_requested: max,
310 max_allowed: Pages::max_value(),
311 });
312 }
313 if max < memory.minimum {
314 return Err(MemoryError::InvalidMemory {
315 reason: format!(
316 "the maximum ({} pages) is less than the minimum ({} pages)",
317 max.0, memory.minimum.0
318 ),
319 });
320 }
321 }
322
323 let offset_guard_bytes = style.offset_guard_size() as usize;
324
325 let minimum_pages = match style {
326 MemoryStyle::Dynamic { .. } => memory.minimum,
327 MemoryStyle::Static { bound, .. } => {
328 assert_ge!(*bound, memory.minimum);
329 *bound
330 }
331 };
332 let minimum_bytes = minimum_pages.bytes().0;
333 let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
334 let mapped_pages = memory.minimum;
335 let mapped_bytes = mapped_pages.bytes();
336
337 let mut alloc =
338 Mmap::accessible_reserved(mapped_bytes.0, request_bytes, backing_file, memory_type)
339 .map_err(MemoryError::Region)?;
340
341 let base_ptr = alloc.as_mut_ptr();
342 let mem_length = memory
343 .minimum
344 .bytes()
345 .0
346 .max(alloc.as_slice_accessible().len());
347 let mmap = WasmMmap {
348 vm_memory_definition: if let Some(mem_loc) = vm_memory_location {
349 {
350 let mut ptr = mem_loc;
351 let md = ptr.as_mut();
352 md.base = base_ptr;
353 md.current_length = mem_length;
354 }
355 MaybeInstanceOwned::Instance(mem_loc)
356 } else {
357 MaybeInstanceOwned::Host(Box::new(UnsafeCell::new(VMMemoryDefinition {
358 base: base_ptr,
359 current_length: mem_length,
360 })))
361 },
362 alloc,
363 size: Bytes::from(mem_length).try_into().unwrap(),
364 };
365
366 Ok(Self {
367 mmap,
368 config: VMMemoryConfig {
369 maximum: memory.maximum,
370 offset_guard_size: offset_guard_bytes,
371 memory: *memory,
372 style: *style,
373 },
374 })
375 }
376 }
377
378 pub fn to_shared(self) -> VMSharedMemory {
380 VMSharedMemory {
381 mmap: Rc::new(RwLock::new(self.mmap)),
382 config: self.config,
383 conditions: ThreadConditions::new(),
384 }
385 }
386
387 pub fn copy(&mut self) -> Result<Self, MemoryError> {
389 Ok(Self {
390 mmap: self.mmap.copy()?,
391 config: self.config.clone(),
392 })
393 }
394}
395
396impl LinearMemory for VMOwnedMemory {
397 fn ty(&self) -> MemoryType {
399 let minimum = self.mmap.size();
400 self.config.ty(minimum)
401 }
402
403 fn size(&self) -> Pages {
405 self.mmap.size()
406 }
407
408 fn style(&self) -> MemoryStyle {
410 self.config.style()
411 }
412
413 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError> {
418 self.mmap.grow(delta, self.config.clone())
419 }
420
421 fn grow_at_least(&mut self, min_size: u64) -> Result<(), MemoryError> {
424 self.mmap.grow_at_least(min_size, self.config.clone())
425 }
426
427 fn reset(&mut self) -> Result<(), MemoryError> {
429 self.mmap.reset()?;
430 Ok(())
431 }
432
433 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
435 self.mmap.vm_memory_definition.as_ptr()
436 }
437
438 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
440 Err(MemoryError::MemoryNotShared)
441 }
442
443 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
445 let forked = Self::copy(self)?;
446 Ok(Box::new(forked))
447 }
448}
449
450#[derive(Debug, Clone)]
452pub struct VMSharedMemory {
453 mmap: Rc<RwLock<WasmMmap>>,
455 config: VMMemoryConfig,
457 conditions: ThreadConditions,
459}
460
461unsafe impl Send for VMSharedMemory {}
462unsafe impl Sync for VMSharedMemory {}
463
464impl VMSharedMemory {
465 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
470 Ok(VMOwnedMemory::new(memory, style)?.to_shared())
471 }
472
473 pub fn new_with_file(
480 memory: &MemoryType,
481 style: &MemoryStyle,
482 backing_file: std::path::PathBuf,
483 memory_type: MmapType,
484 ) -> Result<Self, MemoryError> {
485 Ok(VMOwnedMemory::new_with_file(memory, style, backing_file, memory_type)?.to_shared())
486 }
487
488 pub unsafe fn from_definition(
496 memory: &MemoryType,
497 style: &MemoryStyle,
498 vm_memory_location: NonNull<VMMemoryDefinition>,
499 ) -> Result<Self, MemoryError> {
500 unsafe {
501 Ok(VMOwnedMemory::from_definition(memory, style, vm_memory_location)?.to_shared())
502 }
503 }
504
505 pub unsafe fn from_definition_with_file(
515 memory: &MemoryType,
516 style: &MemoryStyle,
517 vm_memory_location: NonNull<VMMemoryDefinition>,
518 backing_file: Option<std::path::PathBuf>,
519 memory_type: MmapType,
520 ) -> Result<Self, MemoryError> {
521 unsafe {
522 Ok(VMOwnedMemory::from_definition_with_file(
523 memory,
524 style,
525 vm_memory_location,
526 backing_file,
527 memory_type,
528 )?
529 .to_shared())
530 }
531 }
532
533 pub fn copy(&mut self) -> Result<Self, MemoryError> {
535 let mut guard = self.mmap.write().unwrap();
536 Ok(Self {
537 mmap: Rc::new(RwLock::new(guard.copy()?)),
538 config: self.config.clone(),
539 conditions: ThreadConditions::new(),
540 })
541 }
542}
543
544impl LinearMemory for VMSharedMemory {
545 fn ty(&self) -> MemoryType {
547 let minimum = {
548 let guard = self.mmap.read().unwrap();
549 guard.size()
550 };
551 self.config.ty(minimum)
552 }
553
554 fn size(&self) -> Pages {
556 let guard = self.mmap.read().unwrap();
557 guard.size()
558 }
559
560 fn style(&self) -> MemoryStyle {
562 self.config.style()
563 }
564
565 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError> {
570 let mut guard = self.mmap.write().unwrap();
571 guard.grow(delta, self.config.clone())
572 }
573
574 fn grow_at_least(&mut self, min_size: u64) -> Result<(), MemoryError> {
577 let mut guard = self.mmap.write().unwrap();
578 guard.grow_at_least(min_size, self.config.clone())
579 }
580
581 fn reset(&mut self) -> Result<(), MemoryError> {
583 let mut guard = self.mmap.write().unwrap();
584 guard.reset()?;
585 Ok(())
586 }
587
588 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
590 let guard = self.mmap.read().unwrap();
591 guard.vm_memory_definition.as_ptr()
592 }
593
594 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
596 Ok(Box::new(self.clone()))
597 }
598
599 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
601 let forked = Self::copy(self)?;
602 Ok(Box::new(forked))
603 }
604
605 unsafe fn do_wait(
607 &mut self,
608 dst: u32,
609 expected: ExpectedValue,
610 timeout: Option<Duration>,
611 ) -> Result<u32, WaiterError> {
612 let dst = NotifyLocation {
613 address: dst,
614 memory_base: self.mmap.read().unwrap().alloc.as_ptr() as *mut _,
615 };
616 unsafe { self.conditions.do_wait(dst, expected, timeout) }
617 }
618
619 fn do_notify(&mut self, dst: u32, count: u32) -> u32 {
621 self.conditions.do_notify(dst, count)
622 }
623
624 fn thread_conditions(&self) -> Option<&ThreadConditions> {
625 Some(&self.conditions)
626 }
627}
628
629impl From<VMOwnedMemory> for VMMemory {
630 fn from(mem: VMOwnedMemory) -> Self {
631 Self(Box::new(mem))
632 }
633}
634
635impl From<VMSharedMemory> for VMMemory {
636 fn from(mem: VMSharedMemory) -> Self {
637 Self(Box::new(mem))
638 }
639}
640
641#[derive(Debug)]
643pub struct VMMemory(pub Box<dyn LinearMemory + 'static>);
644
645impl From<Box<dyn LinearMemory + 'static>> for VMMemory {
646 fn from(mem: Box<dyn LinearMemory + 'static>) -> Self {
647 Self(mem)
648 }
649}
650
651impl LinearMemory for VMMemory {
652 fn ty(&self) -> MemoryType {
654 self.0.ty()
655 }
656
657 fn size(&self) -> Pages {
659 self.0.size()
660 }
661
662 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError> {
667 self.0.grow(delta)
668 }
669
670 fn grow_at_least(&mut self, min_size: u64) -> Result<(), MemoryError> {
673 self.0.grow_at_least(min_size)
674 }
675
676 fn reset(&mut self) -> Result<(), MemoryError> {
678 self.0.reset()?;
679 Ok(())
680 }
681
682 fn style(&self) -> MemoryStyle {
684 self.0.style()
685 }
686
687 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
689 self.0.vmmemory()
690 }
691
692 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
694 self.0.try_clone()
695 }
696
697 unsafe fn initialize_with_data(&self, start: usize, data: &[u8]) -> Result<(), Trap> {
699 unsafe { self.0.initialize_with_data(start, data) }
700 }
701
702 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
704 self.0.copy()
705 }
706
707 unsafe fn do_wait(
709 &mut self,
710 dst: u32,
711 expected: ExpectedValue,
712 timeout: Option<Duration>,
713 ) -> Result<u32, WaiterError> {
714 unsafe { self.0.do_wait(dst, expected, timeout) }
715 }
716
717 fn do_notify(&mut self, dst: u32, count: u32) -> u32 {
719 self.0.do_notify(dst, count)
720 }
721
722 fn thread_conditions(&self) -> Option<&ThreadConditions> {
723 self.0.thread_conditions()
724 }
725}
726
727impl VMMemory {
728 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
734 Ok(if memory.shared {
735 Self(Box::new(VMSharedMemory::new(memory, style)?))
736 } else {
737 Self(Box::new(VMOwnedMemory::new(memory, style)?))
738 })
739 }
740
741 pub fn get_runtime_size(&self) -> u32 {
743 self.0.size().0
744 }
745
746 pub unsafe fn from_definition(
754 memory: &MemoryType,
755 style: &MemoryStyle,
756 vm_memory_location: NonNull<VMMemoryDefinition>,
757 ) -> Result<Self, MemoryError> {
758 unsafe {
759 Ok(if memory.shared {
760 Self(Box::new(VMSharedMemory::from_definition(
761 memory,
762 style,
763 vm_memory_location,
764 )?))
765 } else {
766 Self(Box::new(VMOwnedMemory::from_definition(
767 memory,
768 style,
769 vm_memory_location,
770 )?))
771 })
772 }
773 }
774
775 pub fn from_custom<IntoVMMemory>(memory: IntoVMMemory) -> Self
780 where
781 IntoVMMemory: Into<Self>,
782 {
783 memory.into()
784 }
785
786 pub fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
788 LinearMemory::copy(self)
789 }
790}
791
792#[doc(hidden)]
793pub unsafe fn initialize_memory_with_data(
795 memory: &VMMemoryDefinition,
796 start: usize,
797 data: &[u8],
798) -> Result<(), Trap> {
799 unsafe {
800 let mem_slice = slice::from_raw_parts_mut(memory.base, memory.current_length);
801 let end = start + data.len();
802 let to_init = &mut mem_slice[start..end];
803 to_init.copy_from_slice(data);
804
805 Ok(())
806 }
807}
808
809pub trait LinearMemory
811where
812 Self: std::fmt::Debug + Send,
813{
814 fn ty(&self) -> MemoryType;
816
817 fn size(&self) -> Pages;
819
820 fn style(&self) -> MemoryStyle;
822
823 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError>;
828
829 fn grow_at_least(&mut self, _min_size: u64) -> Result<(), MemoryError> {
832 Err(MemoryError::UnsupportedOperation {
833 message: "grow_at_least() is not supported".to_string(),
834 })
835 }
836
837 fn reset(&mut self) -> Result<(), MemoryError> {
839 Err(MemoryError::UnsupportedOperation {
840 message: "reset() is not supported".to_string(),
841 })
842 }
843
844 fn vmmemory(&self) -> NonNull<VMMemoryDefinition>;
846
847 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError>;
849
850 #[doc(hidden)]
851 unsafe fn initialize_with_data(&self, start: usize, data: &[u8]) -> Result<(), Trap> {
855 unsafe {
856 let memory = self.vmmemory().as_ref();
857
858 initialize_memory_with_data(memory, start, data)
859 }
860 }
861
862 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError>;
864
865 unsafe fn do_wait(
874 &mut self,
875 _dst: u32,
876 _expected: ExpectedValue,
877 _timeout: Option<Duration>,
878 ) -> Result<u32, WaiterError> {
879 Err(WaiterError::Unimplemented)
880 }
881
882 fn do_notify(&mut self, _dst: u32, _count: u32) -> u32 {
884 0
885 }
886
887 fn thread_conditions(&self) -> Option<&ThreadConditions> {
891 None
892 }
893}