1use crate::threadconditions::ThreadConditions;
9pub use crate::threadconditions::{NotifyLocation, WaiterError};
10use crate::trap::Trap;
11use crate::{
12 mmap::{Mmap, MmapType},
13 store::MaybeInstanceOwned,
14 threadconditions::ExpectedValue,
15 vmcontext::VMMemoryDefinition,
16};
17use more_asserts::assert_ge;
18use std::cell::UnsafeCell;
19use std::convert::TryInto;
20use std::ptr::NonNull;
21use std::rc::Rc;
22use std::slice;
23use std::sync::RwLock;
24use std::time::Duration;
25use wasmer_types::{Bytes, MemoryError, MemoryStyle, MemoryType, Pages, WASM_PAGE_SIZE};
26
27#[derive(Debug)]
29struct WasmMmap {
30 alloc: Mmap,
32 size: Pages,
34 vm_memory_definition: MaybeInstanceOwned<VMMemoryDefinition>,
36}
37
38impl WasmMmap {
39 fn get_vm_memory_definition(&self) -> NonNull<VMMemoryDefinition> {
40 self.vm_memory_definition.as_ptr()
41 }
42
43 fn size(&self) -> Pages {
44 unsafe {
45 let md_ptr = self.get_vm_memory_definition();
46 let md = md_ptr.as_ref();
47 Bytes::from(md.current_length).try_into().unwrap()
48 }
49 }
50
51 fn grow(&mut self, delta: Pages, conf: VMMemoryConfig) -> Result<Pages, MemoryError> {
52 if delta.0 == 0 {
54 return Ok(self.size);
55 }
56
57 let new_pages = self
58 .size
59 .checked_add(delta)
60 .ok_or(MemoryError::CouldNotGrow {
61 current: self.size,
62 attempted_delta: delta,
63 })?;
64 let prev_pages = self.size;
65
66 if let Some(maximum) = conf.maximum
67 && new_pages > maximum
68 {
69 return Err(MemoryError::CouldNotGrow {
70 current: self.size,
71 attempted_delta: delta,
72 });
73 }
74
75 if new_pages > Pages::max_value() {
79 return Err(MemoryError::CouldNotGrow {
81 current: self.size,
82 attempted_delta: delta,
83 });
84 }
85
86 let delta_bytes = delta.bytes().0;
87 let prev_bytes = prev_pages.bytes().0;
88 let new_bytes = new_pages.bytes().0;
89
90 if new_bytes > self.alloc.len() - conf.offset_guard_size {
91 let guard_bytes = conf.offset_guard_size;
94 let request_bytes =
95 new_bytes
96 .checked_add(guard_bytes)
97 .ok_or_else(|| MemoryError::CouldNotGrow {
98 current: new_pages,
99 attempted_delta: Bytes(guard_bytes).try_into().unwrap(),
100 })?;
101
102 let mut new_mmap =
103 Mmap::accessible_reserved(new_bytes, request_bytes, None, MmapType::Private)
104 .map_err(MemoryError::Region)?;
105
106 let copy_len = self.alloc.len() - conf.offset_guard_size;
107 new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.alloc.as_slice()[..copy_len]);
108
109 self.alloc = new_mmap;
110 } else if delta_bytes > 0 {
111 self.alloc
113 .make_accessible(prev_bytes, delta_bytes)
114 .map_err(MemoryError::Region)?;
115 }
116
117 self.size = new_pages;
118
119 unsafe {
121 let mut md_ptr = self.vm_memory_definition.as_ptr();
122 let md = md_ptr.as_mut();
123 md.current_length = new_pages.bytes().0;
124 md.base = self.alloc.as_mut_ptr() as _;
125 }
126
127 Ok(prev_pages)
128 }
129
130 fn grow_at_least(&mut self, min_size: u64, conf: VMMemoryConfig) -> Result<(), MemoryError> {
133 let cur_size = self.size.bytes().0 as u64;
134 if cur_size < min_size {
135 let growth = min_size - cur_size;
136 let growth_pages = ((growth - 1) / WASM_PAGE_SIZE as u64) + 1;
137 self.grow(Pages(growth_pages as u32), conf)?;
138 }
139
140 Ok(())
141 }
142
143 fn reset(&mut self) -> Result<(), MemoryError> {
145 self.size.0 = 0;
146 Ok(())
147 }
148
149 pub fn copy(&mut self) -> Result<Self, MemoryError> {
152 let mem_length = self.size.bytes().0;
153 let mut alloc = self
154 .alloc
155 .copy(Some(mem_length))
156 .map_err(MemoryError::Generic)?;
157 let base_ptr = alloc.as_mut_ptr();
158 Ok(Self {
159 vm_memory_definition: MaybeInstanceOwned::Host(Box::new(UnsafeCell::new(
160 VMMemoryDefinition {
161 base: base_ptr,
162 current_length: mem_length,
163 },
164 ))),
165 alloc,
166 size: self.size,
167 })
168 }
169}
170
171#[derive(Debug, Clone)]
173struct VMMemoryConfig {
174 maximum: Option<Pages>,
176 memory: MemoryType,
178 style: MemoryStyle,
180 offset_guard_size: usize,
183}
184
185impl VMMemoryConfig {
186 fn ty(&self, minimum: Pages) -> MemoryType {
187 let mut out = self.memory;
188 out.minimum = minimum;
189
190 out
191 }
192
193 fn style(&self) -> MemoryStyle {
194 self.style
195 }
196}
197
198#[derive(Debug)]
200pub struct VMOwnedMemory {
201 mmap: WasmMmap,
203 config: VMMemoryConfig,
205}
206
207unsafe impl Send for VMOwnedMemory {}
208unsafe impl Sync for VMOwnedMemory {}
209
210impl VMOwnedMemory {
211 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
216 unsafe { Self::new_internal(memory, style, None, None, MmapType::Private) }
217 }
218
219 pub fn new_with_file(
226 memory: &MemoryType,
227 style: &MemoryStyle,
228 backing_file: std::path::PathBuf,
229 memory_type: MmapType,
230 ) -> Result<Self, MemoryError> {
231 unsafe { Self::new_internal(memory, style, None, Some(backing_file), memory_type) }
232 }
233
234 pub unsafe fn from_definition(
242 memory: &MemoryType,
243 style: &MemoryStyle,
244 vm_memory_location: NonNull<VMMemoryDefinition>,
245 ) -> Result<Self, MemoryError> {
246 unsafe {
247 Self::new_internal(
248 memory,
249 style,
250 Some(vm_memory_location),
251 None,
252 MmapType::Private,
253 )
254 }
255 }
256
257 pub unsafe fn from_definition_with_file(
267 memory: &MemoryType,
268 style: &MemoryStyle,
269 vm_memory_location: NonNull<VMMemoryDefinition>,
270 backing_file: Option<std::path::PathBuf>,
271 memory_type: MmapType,
272 ) -> Result<Self, MemoryError> {
273 unsafe {
274 Self::new_internal(
275 memory,
276 style,
277 Some(vm_memory_location),
278 backing_file,
279 memory_type,
280 )
281 }
282 }
283
284 unsafe fn new_internal(
286 memory: &MemoryType,
287 style: &MemoryStyle,
288 vm_memory_location: Option<NonNull<VMMemoryDefinition>>,
289 backing_file: Option<std::path::PathBuf>,
290 memory_type: MmapType,
291 ) -> Result<Self, MemoryError> {
292 unsafe {
293 if memory.minimum > Pages::max_value() {
294 return Err(MemoryError::MinimumMemoryTooLarge {
295 min_requested: memory.minimum,
296 max_allowed: Pages::max_value(),
297 });
298 }
299 if let Some(max) = memory.maximum {
301 if max > Pages::max_value() {
302 return Err(MemoryError::MaximumMemoryTooLarge {
303 max_requested: max,
304 max_allowed: Pages::max_value(),
305 });
306 }
307 if max < memory.minimum {
308 return Err(MemoryError::InvalidMemory {
309 reason: format!(
310 "the maximum ({} pages) is less than the minimum ({} pages)",
311 max.0, memory.minimum.0
312 ),
313 });
314 }
315 }
316
317 let offset_guard_bytes = style.offset_guard_size() as usize;
318
319 let minimum_pages = match style {
320 MemoryStyle::Dynamic { .. } => memory.minimum,
321 MemoryStyle::Static { bound, .. } => {
322 assert_ge!(*bound, memory.minimum);
323 *bound
324 }
325 };
326 let minimum_bytes = minimum_pages.bytes().0;
327 let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
328 let mapped_pages = memory.minimum;
329 let mapped_bytes = mapped_pages.bytes();
330
331 let mut alloc =
332 Mmap::accessible_reserved(mapped_bytes.0, request_bytes, backing_file, memory_type)
333 .map_err(MemoryError::Region)?;
334
335 let base_ptr = alloc.as_mut_ptr();
336 let mem_length = memory
337 .minimum
338 .bytes()
339 .0
340 .max(alloc.as_slice_accessible().len());
341 let mmap = WasmMmap {
342 vm_memory_definition: if let Some(mem_loc) = vm_memory_location {
343 {
344 let mut ptr = mem_loc;
345 let md = ptr.as_mut();
346 md.base = base_ptr;
347 md.current_length = mem_length;
348 }
349 MaybeInstanceOwned::Instance(mem_loc)
350 } else {
351 MaybeInstanceOwned::Host(Box::new(UnsafeCell::new(VMMemoryDefinition {
352 base: base_ptr,
353 current_length: mem_length,
354 })))
355 },
356 alloc,
357 size: Bytes::from(mem_length).try_into().unwrap(),
358 };
359
360 Ok(Self {
361 mmap,
362 config: VMMemoryConfig {
363 maximum: memory.maximum,
364 offset_guard_size: offset_guard_bytes,
365 memory: *memory,
366 style: *style,
367 },
368 })
369 }
370 }
371
372 pub fn to_shared(self) -> VMSharedMemory {
374 VMSharedMemory {
375 mmap: Rc::new(RwLock::new(self.mmap)),
376 config: self.config,
377 conditions: ThreadConditions::new(),
378 }
379 }
380
381 pub fn copy(&mut self) -> Result<Self, MemoryError> {
383 Ok(Self {
384 mmap: self.mmap.copy()?,
385 config: self.config.clone(),
386 })
387 }
388}
389
390impl LinearMemory for VMOwnedMemory {
391 fn ty(&self) -> MemoryType {
393 let minimum = self.mmap.size();
394 self.config.ty(minimum)
395 }
396
397 fn size(&self) -> Pages {
399 self.mmap.size()
400 }
401
402 fn style(&self) -> MemoryStyle {
404 self.config.style()
405 }
406
407 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError> {
412 self.mmap.grow(delta, self.config.clone())
413 }
414
415 fn grow_at_least(&mut self, min_size: u64) -> Result<(), MemoryError> {
418 self.mmap.grow_at_least(min_size, self.config.clone())
419 }
420
421 fn reset(&mut self) -> Result<(), MemoryError> {
423 self.mmap.reset()?;
424 Ok(())
425 }
426
427 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
429 self.mmap.vm_memory_definition.as_ptr()
430 }
431
432 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
434 Err(MemoryError::MemoryNotShared)
435 }
436
437 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
439 let forked = Self::copy(self)?;
440 Ok(Box::new(forked))
441 }
442}
443
444#[derive(Debug, Clone)]
446pub struct VMSharedMemory {
447 mmap: Rc<RwLock<WasmMmap>>,
449 config: VMMemoryConfig,
451 conditions: ThreadConditions,
453}
454
455unsafe impl Send for VMSharedMemory {}
456unsafe impl Sync for VMSharedMemory {}
457
458impl VMSharedMemory {
459 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
464 Ok(VMOwnedMemory::new(memory, style)?.to_shared())
465 }
466
467 pub fn new_with_file(
474 memory: &MemoryType,
475 style: &MemoryStyle,
476 backing_file: std::path::PathBuf,
477 memory_type: MmapType,
478 ) -> Result<Self, MemoryError> {
479 Ok(VMOwnedMemory::new_with_file(memory, style, backing_file, memory_type)?.to_shared())
480 }
481
482 pub unsafe fn from_definition(
490 memory: &MemoryType,
491 style: &MemoryStyle,
492 vm_memory_location: NonNull<VMMemoryDefinition>,
493 ) -> Result<Self, MemoryError> {
494 unsafe {
495 Ok(VMOwnedMemory::from_definition(memory, style, vm_memory_location)?.to_shared())
496 }
497 }
498
499 pub unsafe fn from_definition_with_file(
509 memory: &MemoryType,
510 style: &MemoryStyle,
511 vm_memory_location: NonNull<VMMemoryDefinition>,
512 backing_file: Option<std::path::PathBuf>,
513 memory_type: MmapType,
514 ) -> Result<Self, MemoryError> {
515 unsafe {
516 Ok(VMOwnedMemory::from_definition_with_file(
517 memory,
518 style,
519 vm_memory_location,
520 backing_file,
521 memory_type,
522 )?
523 .to_shared())
524 }
525 }
526
527 pub fn copy(&mut self) -> Result<Self, MemoryError> {
529 let mut guard = self.mmap.write().unwrap();
530 Ok(Self {
531 mmap: Rc::new(RwLock::new(guard.copy()?)),
532 config: self.config.clone(),
533 conditions: ThreadConditions::new(),
534 })
535 }
536}
537
538impl LinearMemory for VMSharedMemory {
539 fn ty(&self) -> MemoryType {
541 let minimum = {
542 let guard = self.mmap.read().unwrap();
543 guard.size()
544 };
545 self.config.ty(minimum)
546 }
547
548 fn size(&self) -> Pages {
550 let guard = self.mmap.read().unwrap();
551 guard.size()
552 }
553
554 fn style(&self) -> MemoryStyle {
556 self.config.style()
557 }
558
559 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError> {
564 let mut guard = self.mmap.write().unwrap();
565 guard.grow(delta, self.config.clone())
566 }
567
568 fn grow_at_least(&mut self, min_size: u64) -> Result<(), MemoryError> {
571 let mut guard = self.mmap.write().unwrap();
572 guard.grow_at_least(min_size, self.config.clone())
573 }
574
575 fn reset(&mut self) -> Result<(), MemoryError> {
577 let mut guard = self.mmap.write().unwrap();
578 guard.reset()?;
579 Ok(())
580 }
581
582 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
584 let guard = self.mmap.read().unwrap();
585 guard.vm_memory_definition.as_ptr()
586 }
587
588 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
590 Ok(Box::new(self.clone()))
591 }
592
593 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
595 let forked = Self::copy(self)?;
596 Ok(Box::new(forked))
597 }
598
599 unsafe fn do_wait(
601 &mut self,
602 dst: u32,
603 expected: ExpectedValue,
604 timeout: Option<Duration>,
605 ) -> Result<u32, WaiterError> {
606 let dst = NotifyLocation {
607 address: dst,
608 memory_base: self.mmap.read().unwrap().alloc.as_ptr() as *mut _,
609 };
610 unsafe { self.conditions.do_wait(dst, expected, timeout) }
611 }
612
613 fn do_notify(&mut self, dst: u32, count: u32) -> u32 {
615 self.conditions.do_notify(dst, count)
616 }
617
618 fn thread_conditions(&self) -> Option<&ThreadConditions> {
619 Some(&self.conditions)
620 }
621}
622
623impl From<VMOwnedMemory> for VMMemory {
624 fn from(mem: VMOwnedMemory) -> Self {
625 Self(Box::new(mem))
626 }
627}
628
629impl From<VMSharedMemory> for VMMemory {
630 fn from(mem: VMSharedMemory) -> Self {
631 Self(Box::new(mem))
632 }
633}
634
635#[derive(Debug)]
637pub struct VMMemory(pub Box<dyn LinearMemory + 'static>);
638
639impl From<Box<dyn LinearMemory + 'static>> for VMMemory {
640 fn from(mem: Box<dyn LinearMemory + 'static>) -> Self {
641 Self(mem)
642 }
643}
644
645impl LinearMemory for VMMemory {
646 fn ty(&self) -> MemoryType {
648 self.0.ty()
649 }
650
651 fn size(&self) -> Pages {
653 self.0.size()
654 }
655
656 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError> {
661 self.0.grow(delta)
662 }
663
664 fn grow_at_least(&mut self, min_size: u64) -> Result<(), MemoryError> {
667 self.0.grow_at_least(min_size)
668 }
669
670 fn reset(&mut self) -> Result<(), MemoryError> {
672 self.0.reset()?;
673 Ok(())
674 }
675
676 fn style(&self) -> MemoryStyle {
678 self.0.style()
679 }
680
681 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
683 self.0.vmmemory()
684 }
685
686 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
688 self.0.try_clone()
689 }
690
691 unsafe fn initialize_with_data(&self, start: usize, data: &[u8]) -> Result<(), Trap> {
693 unsafe { self.0.initialize_with_data(start, data) }
694 }
695
696 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
698 self.0.copy()
699 }
700
701 unsafe fn do_wait(
703 &mut self,
704 dst: u32,
705 expected: ExpectedValue,
706 timeout: Option<Duration>,
707 ) -> Result<u32, WaiterError> {
708 unsafe { self.0.do_wait(dst, expected, timeout) }
709 }
710
711 fn do_notify(&mut self, dst: u32, count: u32) -> u32 {
713 self.0.do_notify(dst, count)
714 }
715
716 fn thread_conditions(&self) -> Option<&ThreadConditions> {
717 self.0.thread_conditions()
718 }
719}
720
721impl VMMemory {
722 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
728 Ok(if memory.shared {
729 Self(Box::new(VMSharedMemory::new(memory, style)?))
730 } else {
731 Self(Box::new(VMOwnedMemory::new(memory, style)?))
732 })
733 }
734
735 pub fn get_runtime_size(&self) -> u32 {
737 self.0.size().0
738 }
739
740 pub unsafe fn from_definition(
748 memory: &MemoryType,
749 style: &MemoryStyle,
750 vm_memory_location: NonNull<VMMemoryDefinition>,
751 ) -> Result<Self, MemoryError> {
752 unsafe {
753 Ok(if memory.shared {
754 Self(Box::new(VMSharedMemory::from_definition(
755 memory,
756 style,
757 vm_memory_location,
758 )?))
759 } else {
760 Self(Box::new(VMOwnedMemory::from_definition(
761 memory,
762 style,
763 vm_memory_location,
764 )?))
765 })
766 }
767 }
768
769 pub fn from_custom<IntoVMMemory>(memory: IntoVMMemory) -> Self
774 where
775 IntoVMMemory: Into<Self>,
776 {
777 memory.into()
778 }
779
780 pub fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError> {
782 LinearMemory::copy(self)
783 }
784}
785
786#[doc(hidden)]
787pub unsafe fn initialize_memory_with_data(
789 memory: &VMMemoryDefinition,
790 start: usize,
791 data: &[u8],
792) -> Result<(), Trap> {
793 unsafe {
794 let mem_slice = slice::from_raw_parts_mut(memory.base, memory.current_length);
795 let end = start + data.len();
796 let to_init = &mut mem_slice[start..end];
797 to_init.copy_from_slice(data);
798
799 Ok(())
800 }
801}
802
803pub trait LinearMemory
805where
806 Self: std::fmt::Debug + Send,
807{
808 fn ty(&self) -> MemoryType;
810
811 fn size(&self) -> Pages;
813
814 fn style(&self) -> MemoryStyle;
816
817 fn grow(&mut self, delta: Pages) -> Result<Pages, MemoryError>;
822
823 fn grow_at_least(&mut self, _min_size: u64) -> Result<(), MemoryError> {
826 Err(MemoryError::UnsupportedOperation {
827 message: "grow_at_least() is not supported".to_string(),
828 })
829 }
830
831 fn reset(&mut self) -> Result<(), MemoryError> {
833 Err(MemoryError::UnsupportedOperation {
834 message: "reset() is not supported".to_string(),
835 })
836 }
837
838 fn vmmemory(&self) -> NonNull<VMMemoryDefinition>;
840
841 fn try_clone(&self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError>;
843
844 #[doc(hidden)]
845 unsafe fn initialize_with_data(&self, start: usize, data: &[u8]) -> Result<(), Trap> {
849 unsafe {
850 let memory = self.vmmemory().as_ref();
851
852 initialize_memory_with_data(memory, start, data)
853 }
854 }
855
856 fn copy(&mut self) -> Result<Box<dyn LinearMemory + 'static>, MemoryError>;
858
859 unsafe fn do_wait(
868 &mut self,
869 _dst: u32,
870 _expected: ExpectedValue,
871 _timeout: Option<Duration>,
872 ) -> Result<u32, WaiterError> {
873 Err(WaiterError::Unimplemented)
874 }
875
876 fn do_notify(&mut self, _dst: u32, _count: u32) -> u32 {
878 0
879 }
880
881 fn thread_conditions(&self) -> Option<&ThreadConditions> {
885 None
886 }
887}