1use crate::threadconditions::ThreadConditions;
9pub use crate::threadconditions::{NotifyLocation, WaiterError};
10use crate::trap::Trap;
11use crate::{
12 mmap::{Mmap, MmapType},
13 store::MaybeInstanceOwned,
14 threadconditions::ExpectedValue,
15 vmcontext::VMMemoryDefinition,
16};
17use more_asserts::assert_ge;
18use std::cell::UnsafeCell;
19use std::convert::TryInto;
20use std::ptr::NonNull;
21use std::rc::Rc;
22use std::slice;
23use std::sync::RwLock;
24use std::time::Duration;
25use wasmer_types::{Bytes, MemoryError, MemoryStyle, MemoryType, Pages, WASM_PAGE_SIZE};
26
27#[derive(Debug)]
29struct WasmMmap {
30 alloc: Mmap,
32 size: Pages,
34 vm_memory_definition: MaybeInstanceOwned<VMMemoryDefinition>,
36}
37
38impl WasmMmap {
39 fn get_vm_memory_definition(&self) -> NonNull<VMMemoryDefinition> {
40 self.vm_memory_definition.as_ptr()
41 }
42
43 fn size(&self) -> Pages {
44 unsafe {
45 let md_ptr = self.get_vm_memory_definition();
46 let md = md_ptr.as_ref();
47 Bytes::from(md.current_length).try_into().unwrap()
48 }
49 }
50
51 fn grow(&mut self, delta: Pages, conf: VMMemoryConfig) -> Result<Pages, MemoryError> {
52 if delta.0 == 0 {
54 return Ok(self.size);
55 }
56
57 let new_pages = self
58 .size
59 .checked_add(delta)
60 .ok_or(MemoryError::CouldNotGrow {
61 current: self.size,
62 attempted_delta: delta,
63 })?;
64 let prev_pages = self.size;
65
66 if let Some(maximum) = conf.maximum
67 && new_pages > maximum
68 {
69 return Err(MemoryError::CouldNotGrow {
70 current: self.size,
71 attempted_delta: delta,
72 });
73 }
74
75 if new_pages > Pages::max_value() {
79 return Err(MemoryError::CouldNotGrow {
81 current: self.size,
82 attempted_delta: delta,
83 });
84 }
85
86 let delta_bytes = delta.bytes().0;
87 let prev_bytes = prev_pages.bytes().0;
88 let new_bytes = new_pages.bytes().0;
89
90 if new_bytes > self.alloc.len() - conf.offset_guard_size {
91 let guard_bytes = conf.offset_guard_size;
94 let request_bytes =
95 new_bytes
96 .checked_add(guard_bytes)
97 .ok_or_else(|| MemoryError::CouldNotGrow {
98 current: new_pages,
99 attempted_delta: Bytes(guard_bytes).try_into().unwrap(),
100 })?;
101
102 let mut new_mmap =
103 Mmap::accessible_reserved(new_bytes, request_bytes, None, MmapType::Private)
104 .map_err(MemoryError::Region)?;
105
106 let copy_len = self.alloc.len() - conf.offset_guard_size;
107 new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.alloc.as_slice()[..copy_len]);
108
109 self.alloc = new_mmap;
110 } else if delta_bytes > 0 {
111 self.alloc
113 .make_accessible(prev_bytes, delta_bytes)
114 .map_err(MemoryError::Region)?;
115 }
116
117 self.size = new_pages;
118
119 unsafe {
121 let mut md_ptr = self.vm_memory_definition.as_ptr();
122 let md = md_ptr.as_mut();
123 md.current_length = new_pages.bytes().0;
124 md.base = self.alloc.as_mut_ptr() as _;
125 }
126
127 Ok(prev_pages)
128 }
129
130 fn grow_at_least(&mut self, min_size: u64, conf: VMMemoryConfig) -> Result<(), MemoryError> {
133 let cur_size = self.size.bytes().0 as u64;
134 if cur_size < min_size {
135 let growth = min_size - cur_size;
136 let growth_pages = ((growth - 1) / WASM_PAGE_SIZE as u64) + 1;
137 self.grow(Pages(growth_pages as u32), conf)?;
138 }
139
140 Ok(())
141 }
142
143 fn reset(&mut self) -> Result<(), MemoryError> {
145 self.size.0 = 0;
146 unsafe {
148 let mut md_ptr = self.vm_memory_definition.as_ptr();
149 let md = md_ptr.as_mut();
150 md.current_length = 0;
151 }
152 Ok(())
153 }
154
155 pub fn copy(&mut self) -> Result<Self, MemoryError> {
158 let mem_length = self.size.bytes().0;
159 let mut alloc = self
160 .alloc
161 .copy(Some(mem_length))
162 .map_err(MemoryError::Generic)?;
163 let base_ptr = alloc.as_mut_ptr();
164 Ok(Self {
165 vm_memory_definition: MaybeInstanceOwned::Host(Box::new(UnsafeCell::new(
166 VMMemoryDefinition {
167 base: base_ptr,
168 current_length: mem_length,
169 },
170 ))),
171 alloc,
172 size: self.size,
173 })
174 }
175}
176
177#[derive(Debug, Clone)]
179struct VMMemoryConfig {
180 maximum: Option<Pages>,
182 memory: MemoryType,
184 style: MemoryStyle,
186 offset_guard_size: usize,
189}
190
191impl VMMemoryConfig {
192 fn ty(&self, minimum: Pages) -> MemoryType {
193 let mut out = self.memory;
194 out.minimum = minimum;
195
196 out
197 }
198
199 fn style(&self) -> MemoryStyle {
200 self.style
201 }
202}
203
204#[derive(Debug)]
206pub struct VMOwnedMemory {
207 mmap: WasmMmap,
209 config: VMMemoryConfig,
211}
212
213unsafe impl Send for VMOwnedMemory {}
214unsafe impl Sync for VMOwnedMemory {}
215
216impl VMOwnedMemory {
217 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
222 unsafe { Self::new_internal(memory, style, None, None, MmapType::Private) }
223 }
224
225 pub fn new_with_file(
232 memory: &MemoryType,
233 style: &MemoryStyle,
234 backing_file: std::path::PathBuf,
235 memory_type: MmapType,
236 ) -> Result<Self, MemoryError> {
237 unsafe { Self::new_internal(memory, style, None, Some(backing_file), memory_type) }
238 }
239
240 pub unsafe fn from_definition(
248 memory: &MemoryType,
249 style: &MemoryStyle,
250 vm_memory_location: NonNull<VMMemoryDefinition>,
251 ) -> Result<Self, MemoryError> {
252 unsafe {
253 Self::new_internal(
254 memory,
255 style,
256 Some(vm_memory_location),
257 None,
258 MmapType::Private,
259 )
260 }
261 }
262
263 pub unsafe fn from_definition_with_file(
273 memory: &MemoryType,
274 style: &MemoryStyle,
275 vm_memory_location: NonNull<VMMemoryDefinition>,
276 backing_file: Option<std::path::PathBuf>,
277 memory_type: MmapType,
278 ) -> Result<Self, MemoryError> {
279 unsafe {
280 Self::new_internal(
281 memory,
282 style,
283 Some(vm_memory_location),
284 backing_file,
285 memory_type,
286 )
287 }
288 }
289
290 unsafe fn new_internal(
292 memory: &MemoryType,
293 style: &MemoryStyle,
294 vm_memory_location: Option<NonNull<VMMemoryDefinition>>,
295 backing_file: Option<std::path::PathBuf>,
296 memory_type: MmapType,
297 ) -> Result<Self, MemoryError> {
298 unsafe {
299 if memory.minimum > Pages::max_value() {
300 return Err(MemoryError::MinimumMemoryTooLarge {
301 min_requested: memory.minimum,
302 max_allowed: Pages::max_value(),
303 });
304 }
305 if let Some(max) = memory.maximum {
307 if max > Pages::max_value() {
308 return Err(MemoryError::MaximumMemoryTooLarge {
309 max_requested: max,
310 max_allowed: Pages::max_value(),
311 });
312 }
313 if max < memory.minimum {
314 return Err(MemoryError::InvalidMemory {
315 reason: format!(
316 "the maximum ({} pages) is less than the minimum ({} pages)",
317 max.0, memory.minimum.0
318 ),
319 });
320 }
321 }
322
323 let offset_guard_bytes = style.offset_guard_size() as usize;
324
325 let minimum_pages = match style {
326 MemoryStyle::Dynamic { .. } => memory.minimum,
327 MemoryStyle::Static { bound, .. } => {
328 assert_ge!(*bound, memory.minimum);
329 *bound
330 }
331 };
332 let minimum_bytes = minimum_pages.bytes().0;
333 let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
334 let mapped_pages = memory.minimum;
335 let mapped_bytes = mapped_pages.bytes();
336
337 let mut alloc =
338 Mmap::accessible_reserved(mapped_bytes.0, request_bytes, backing_file, memory_type)
339 .map_err(MemoryError::Region)?;
340
341 let base_ptr = alloc.as_mut_ptr();
342 let mem_length = memory
343 .minimum
344 .bytes()
345 .0
346 .max(alloc.as_slice_accessible().len());
347 let mmap = WasmMmap {
348 vm_memory_definition: if let Some(mem_loc) = vm_memory_location {
349 {
350 let mut ptr = mem_loc;
351 let md = ptr.as_mut();
352 md.base = base_ptr;
353 md.current_length = mem_length;
354 }
355 MaybeInstanceOwned::Instance(mem_loc)
356 } else {
357 MaybeInstanceOwned::Host(Box::new(UnsafeCell::new(VMMemoryDefinition {
358 base: base_ptr,
359 current_length: mem_length,
360 })))
361 },
362 alloc,
363 size: Bytes::from(mem_length).try_into().unwrap(),
364 };
365
366 Ok(Self {
367 mmap,
368 config: VMMemoryConfig {
369 maximum: memory.maximum,
370 offset_guard_size: offset_guard_bytes,
371 memory: *memory,
372 style: *style,
373 },
374 })
375 }
376 }
377
378 pub fn to_shared(self) -> VMSharedMemory {
380 VMSharedMemory {
381 mmap: Rc::new(RwLock::new(self.mmap)),
382 config: self.config,
383 conditions: ThreadConditions::new(),
384 }
385 }
386
387 pub fn copy(&mut self) -> Result<Self, MemoryError> {
389 Ok(Self {
390 mmap: self.mmap.copy()?,
391 config: self.config.clone(),
392 })
393 }
394}
395
396impl LinearMemory for VMOwnedMemory {
398 fn ty(&self) -> MemoryType {
400 let minimum = self.mmap.size();
401 self.config.ty(minimum)
402 }
403
404 fn size(&self) -> Pages {
406 self.mmap.size()
407 }
408
409 fn style(&self) -> MemoryStyle {
411 self.config.style()
412 }
413
414 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError> {
419 self.mmap.grow(delta, self.config.clone())
420 }
421
422 fn grow_at_least(&mut self, min_size: u64) -> Result<(), MemoryError> {
425 self.mmap.grow_at_least(min_size, self.config.clone())
426 }
427
428 fn reset(&mut self) -> Result<(), MemoryError> {
430 self.mmap.reset()?;
431 Ok(())
432 }
433
434 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
436 self.mmap.vm_memory_definition.as_ptr()
437 }
438
439 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
441 Err(MemoryError::MemoryNotShared)
442 }
443
444 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
446 let forked = Self::copy(self)?;
447 Ok(Box::new(forked))
448 }
449}
450
451#[derive(Debug, Clone)]
453pub struct VMSharedMemory {
454 mmap: Rc<RwLock<WasmMmap>>,
456 config: VMMemoryConfig,
458 conditions: ThreadConditions,
460}
461
462unsafe impl Send for VMSharedMemory {}
463unsafe impl Sync for VMSharedMemory {}
464
465impl VMSharedMemory {
466 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
471 Ok(VMOwnedMemory::new(memory, style)?.to_shared())
472 }
473
474 pub fn new_with_file(
481 memory: &MemoryType,
482 style: &MemoryStyle,
483 backing_file: std::path::PathBuf,
484 memory_type: MmapType,
485 ) -> Result<Self, MemoryError> {
486 Ok(VMOwnedMemory::new_with_file(memory, style, backing_file, memory_type)?.to_shared())
487 }
488
489 pub unsafe fn from_definition(
497 memory: &MemoryType,
498 style: &MemoryStyle,
499 vm_memory_location: NonNull<VMMemoryDefinition>,
500 ) -> Result<Self, MemoryError> {
501 unsafe {
502 Ok(VMOwnedMemory::from_definition(memory, style, vm_memory_location)?.to_shared())
503 }
504 }
505
506 pub unsafe fn from_definition_with_file(
516 memory: &MemoryType,
517 style: &MemoryStyle,
518 vm_memory_location: NonNull<VMMemoryDefinition>,
519 backing_file: Option<std::path::PathBuf>,
520 memory_type: MmapType,
521 ) -> Result<Self, MemoryError> {
522 unsafe {
523 Ok(VMOwnedMemory::from_definition_with_file(
524 memory,
525 style,
526 vm_memory_location,
527 backing_file,
528 memory_type,
529 )?
530 .to_shared())
531 }
532 }
533
534 pub fn copy(&mut self) -> Result<Self, MemoryError> {
536 let mut guard = self.mmap.write().unwrap();
537 Ok(Self {
538 mmap: Rc::new(RwLock::new(guard.copy()?)),
539 config: self.config.clone(),
540 conditions: ThreadConditions::new(),
541 })
542 }
543}
544
545impl LinearMemory for VMSharedMemory {
546 fn ty(&self) -> MemoryType {
548 let minimum = {
549 let guard = self.mmap.read().unwrap();
550 guard.size()
551 };
552 self.config.ty(minimum)
553 }
554
555 fn size(&self) -> Pages {
557 let guard = self.mmap.read().unwrap();
558 guard.size()
559 }
560
561 fn style(&self) -> MemoryStyle {
563 self.config.style()
564 }
565
566 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError> {
571 let mut guard = self.mmap.write().unwrap();
572 guard.grow(delta, self.config.clone())
573 }
574
575 fn grow_at_least(&mut self, min_size: u64) -> Result<(), MemoryError> {
578 let mut guard = self.mmap.write().unwrap();
579 guard.grow_at_least(min_size, self.config.clone())
580 }
581
582 fn reset(&mut self) -> Result<(), MemoryError> {
584 let mut guard = self.mmap.write().unwrap();
585 guard.reset()?;
586 Ok(())
587 }
588
589 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
591 let guard = self.mmap.read().unwrap();
592 guard.vm_memory_definition.as_ptr()
593 }
594
595 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
597 Ok(Box::new(self.clone()))
598 }
599
600 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
602 let forked = Self::copy(self)?;
603 Ok(Box::new(forked))
604 }
605
606 unsafe fn do_wait(
608 &mut self,
609 dst: u32,
610 expected: ExpectedValue,
611 timeout: Option<Duration>,
612 ) -> Result<u32, WaiterError> {
613 let dst = NotifyLocation {
614 address: dst,
615 memory_base: self.mmap.read().unwrap().alloc.as_ptr() as *mut _,
616 };
617 unsafe { self.conditions.do_wait(dst, expected, timeout) }
618 }
619
620 fn do_notify(&mut self, dst: u32, count: u32) -> u32 {
622 self.conditions.do_notify(dst, count)
623 }
624
625 fn thread_conditions(&self) -> Option<&ThreadConditions> {
626 Some(&self.conditions)
627 }
628}
629
630impl From<VMOwnedMemory> for VMMemory {
631 fn from(mem: VMOwnedMemory) -> Self {
632 Self(Box::new(mem))
633 }
634}
635
636impl From<VMSharedMemory> for VMMemory {
637 fn from(mem: VMSharedMemory) -> Self {
638 Self(Box::new(mem))
639 }
640}
641
642#[derive(Debug)]
644pub struct VMMemory(pub Box<dyn LinearMemory + 'static>);
645
646impl From<Box<dyn LinearMemory + 'static>> for VMMemory {
647 fn from(mem: Box<dyn LinearMemory + 'static>) -> Self {
648 Self(mem)
649 }
650}
651
652impl LinearMemory for VMMemory {
653 fn ty(&self) -> MemoryType {
655 self.0.ty()
656 }
657
658 fn size(&self) -> Pages {
660 self.0.size()
661 }
662
663 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError> {
668 self.0.grow(delta)
669 }
670
671 fn grow_at_least(&mut self, min_size: u64) -> Result<(), MemoryError> {
674 self.0.grow_at_least(min_size)
675 }
676
677 fn reset(&mut self) -> Result<(), MemoryError> {
679 self.0.reset()?;
680 Ok(())
681 }
682
683 fn style(&self) -> MemoryStyle {
685 self.0.style()
686 }
687
688 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
690 self.0.vmmemory()
691 }
692
693 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
695 self.0.try_clone()
696 }
697
698 unsafe fn initialize_with_data(&self, start: usize, data: &[u8]) -> Result<(), Trap> {
700 unsafe { self.0.initialize_with_data(start, data) }
701 }
702
703 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
705 self.0.copy()
706 }
707
708 unsafe fn do_wait(
710 &mut self,
711 dst: u32,
712 expected: ExpectedValue,
713 timeout: Option<Duration>,
714 ) -> Result<u32, WaiterError> {
715 unsafe { self.0.do_wait(dst, expected, timeout) }
716 }
717
718 fn do_notify(&mut self, dst: u32, count: u32) -> u32 {
720 self.0.do_notify(dst, count)
721 }
722
723 fn thread_conditions(&self) -> Option<&ThreadConditions> {
724 self.0.thread_conditions()
725 }
726}
727
728impl VMMemory {
729 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
735 Ok(if memory.shared {
736 Self(Box::new(VMSharedMemory::new(memory, style)?))
737 } else {
738 Self(Box::new(VMOwnedMemory::new(memory, style)?))
739 })
740 }
741
742 pub fn get_runtime_size(&self) -> u32 {
744 self.0.size().0
745 }
746
747 pub unsafe fn from_definition(
755 memory: &MemoryType,
756 style: &MemoryStyle,
757 vm_memory_location: NonNull<VMMemoryDefinition>,
758 ) -> Result<Self, MemoryError> {
759 unsafe {
760 Ok(if memory.shared {
761 Self(Box::new(VMSharedMemory::from_definition(
762 memory,
763 style,
764 vm_memory_location,
765 )?))
766 } else {
767 Self(Box::new(VMOwnedMemory::from_definition(
768 memory,
769 style,
770 vm_memory_location,
771 )?))
772 })
773 }
774 }
775
776 pub fn from_custom<IntoVMMemory>(memory: IntoVMMemory) -> Self
781 where
782 IntoVMMemory: Into<Self>,
783 {
784 memory.into()
785 }
786
787 pub fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
789 LinearMemory::copy(self)
790 }
791}
792
793#[doc(hidden)]
794pub unsafe fn initialize_memory_with_data(
796 memory: &VMMemoryDefinition,
797 start: usize,
798 data: &[u8],
799) -> Result<(), Trap> {
800 unsafe {
801 let mem_slice = slice::from_raw_parts_mut(memory.base, memory.current_length);
802 let end = start + data.len();
803 let to_init = &mut mem_slice[start..end];
804 to_init.copy_from_slice(data);
805
806 Ok(())
807 }
808}
809
810pub trait LinearMemory
812where
813 Self: std::fmt::Debug + Send,
814{
815 fn ty(&self) -> MemoryType;
817
818 fn size(&self) -> Pages;
820
821 fn style(&self) -> MemoryStyle;
823
824 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError>;
829
830 fn grow_at_least(&mut self, _min_size: u64) -> Result<(), MemoryError> {
833 Err(MemoryError::UnsupportedOperation {
834 message: "grow_at_least() is not supported".to_string(),
835 })
836 }
837
838 fn reset(&mut self) -> Result<(), MemoryError> {
840 Err(MemoryError::UnsupportedOperation {
841 message: "reset() is not supported".to_string(),
842 })
843 }
844
845 fn vmmemory(&self) -> NonNull<VMMemoryDefinition>;
847
848 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError>;
850
851 #[doc(hidden)]
852 unsafe fn initialize_with_data(&self, start: usize, data: &[u8]) -> Result<(), Trap> {
856 unsafe {
857 let memory = self.vmmemory().as_ref();
858
859 initialize_memory_with_data(memory, start, data)
860 }
861 }
862
863 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError>;
865
866 unsafe fn do_wait(
875 &mut self,
876 _dst: u32,
877 _expected: ExpectedValue,
878 _timeout: Option<Duration>,
879 ) -> Result<u32, WaiterError> {
880 Err(WaiterError::Unimplemented)
881 }
882
883 fn do_notify(&mut self, _dst: u32, _count: u32) -> u32 {
885 0
886 }
887
888 fn thread_conditions(&self) -> Option<&ThreadConditions> {
892 None
893 }
894}