wasmer_vm/
vmcontext.rs

1// This file contains code from external sources.
2// Attributions: https://github.com/wasmerio/wasmer/blob/main/docs/ATTRIBUTIONS.md
3
4//! This file declares `VMContext` and several related structs which contain
5//! fields that compiled wasm code accesses directly.
6
7use crate::VMFunctionBody;
8use crate::VMTable;
9use crate::global::VMGlobal;
10use crate::instance::Instance;
11use crate::memory::VMMemory;
12use crate::store::InternalStoreHandle;
13use crate::trap::{Trap, TrapCode};
14use crate::{VMBuiltinFunctionIndex, VMFunction};
15use std::convert::TryFrom;
16use std::hash::{Hash, Hasher};
17use std::ptr::{self, NonNull};
18use std::sync::atomic::{AtomicPtr, Ordering};
19use wasmer_types::RawValue;
20
21/// Union representing the first parameter passed when calling a function.
22///
23/// It may either be a pointer to the [`VMContext`] if it's a Wasm function
24/// or a pointer to arbitrary data controlled by the host if it's a host function.
25#[derive(Copy, Clone, Eq)]
26#[repr(C)]
27pub union VMFunctionContext {
28    /// Wasm functions take a pointer to [`VMContext`].
29    pub vmctx: *mut VMContext,
30    /// Host functions can have custom environments.
31    pub host_env: *mut std::ffi::c_void,
32}
33
34impl VMFunctionContext {
35    /// Check whether the pointer stored is null or not.
36    pub fn is_null(&self) -> bool {
37        unsafe { self.host_env.is_null() }
38    }
39}
40
41impl std::fmt::Debug for VMFunctionContext {
42    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
43        f.debug_struct("VMFunctionContext")
44            .field("vmctx_or_hostenv", unsafe { &self.host_env })
45            .finish()
46    }
47}
48
49impl std::cmp::PartialEq for VMFunctionContext {
50    fn eq(&self, rhs: &Self) -> bool {
51        unsafe { std::ptr::eq(self.host_env, rhs.host_env) }
52    }
53}
54
55impl std::hash::Hash for VMFunctionContext {
56    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
57        unsafe {
58            self.vmctx.hash(state);
59        }
60    }
61}
62
63/// An imported function.
64#[derive(Debug, Copy, Clone)]
65#[repr(C)]
66pub struct VMFunctionImport {
67    /// A pointer to the imported function body.
68    pub body: *const VMFunctionBody,
69
70    /// A pointer to the `VMContext` that owns the function or host env data.
71    pub environment: VMFunctionContext,
72
73    /// Handle to the `VMFunction` in the context.
74    pub handle: InternalStoreHandle<VMFunction>,
75
76    /// Flag if the function requires extra the m0 argument (used for m0 optimization dispatch).
77    pub include_m0_param: bool,
78}
79
80#[cfg(test)]
81mod test_vmfunction_import {
82    use super::VMFunctionImport;
83    use memoffset::offset_of;
84    use std::mem::size_of;
85    use wasmer_types::ModuleInfo;
86    use wasmer_types::VMOffsets;
87
88    #[test]
89    fn check_vmfunction_import_offsets() {
90        let module = ModuleInfo::new();
91        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
92        assert_eq!(
93            size_of::<VMFunctionImport>(),
94            usize::from(offsets.size_of_vmfunction_import())
95        );
96        assert_eq!(
97            offset_of!(VMFunctionImport, body),
98            usize::from(offsets.vmfunction_import_body())
99        );
100        assert_eq!(
101            offset_of!(VMFunctionImport, environment),
102            usize::from(offsets.vmfunction_import_vmctx())
103        );
104    }
105}
106
107/// The `VMDynamicFunctionContext` is the context that dynamic
108/// functions will receive when called (rather than `vmctx`).
109/// A dynamic function is a function for which we don't know the signature
110/// until runtime.
111///
112/// As such, we need to expose the dynamic function `context`
113/// containing the relevant context for running the function indicated
114/// in `address`.
115#[repr(C)]
116pub struct VMDynamicFunctionContext<T> {
117    /// The address of the inner dynamic function.
118    ///
119    /// Note: The function must be on the form of
120    /// `(*mut T, SignatureIndex, *mut i128)`.
121    pub address: *const VMFunctionBody,
122
123    /// The context that the inner dynamic function will receive.
124    pub ctx: T,
125}
126
127// The `ctx` itself must be `Send`, `address` can be passed between
128// threads because all usage is `unsafe` and synchronized.
129unsafe impl<T: Sized + Send + Sync> Send for VMDynamicFunctionContext<T> {}
130// The `ctx` itself must be `Sync`, `address` can be shared between
131// threads because all usage is `unsafe` and synchronized.
132unsafe impl<T: Sized + Send + Sync> Sync for VMDynamicFunctionContext<T> {}
133
134impl<T: Sized + Clone + Send + Sync> Clone for VMDynamicFunctionContext<T> {
135    fn clone(&self) -> Self {
136        Self {
137            address: self.address,
138            ctx: self.ctx.clone(),
139        }
140    }
141}
142
143#[cfg(test)]
144mod test_vmdynamicfunction_import_context {
145    use super::VMDynamicFunctionContext;
146    use crate::VMOffsets;
147    use memoffset::offset_of;
148    use std::mem::size_of;
149    use wasmer_types::ModuleInfo;
150
151    #[test]
152    fn check_vmdynamicfunction_import_context_offsets() {
153        let module = ModuleInfo::new();
154        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
155        assert_eq!(
156            size_of::<VMDynamicFunctionContext<usize>>(),
157            usize::from(offsets.size_of_vmdynamicfunction_import_context())
158        );
159        assert_eq!(
160            offset_of!(VMDynamicFunctionContext<usize>, address),
161            usize::from(offsets.vmdynamicfunction_import_context_address())
162        );
163        assert_eq!(
164            offset_of!(VMDynamicFunctionContext<usize>, ctx),
165            usize::from(offsets.vmdynamicfunction_import_context_ctx())
166        );
167    }
168}
169
170/// A function kind is a calling convention into and out of wasm code.
171#[derive(Debug, Copy, Clone, Eq, PartialEq)]
172#[repr(C)]
173pub enum VMFunctionKind {
174    /// A static function has the native signature:
175    /// `extern "C" (vmctx, arg1, arg2...) -> (result1, result2, ...)`.
176    ///
177    /// This is the default for functions that are defined:
178    /// 1. In the Host, natively
179    /// 2. In the WebAssembly file
180    Static,
181
182    /// A dynamic function has the native signature:
183    /// `extern "C" (ctx, &[Value]) -> Vec<Value>`.
184    ///
185    /// This is the default for functions that are defined:
186    /// 1. In the Host, dynamically
187    Dynamic,
188}
189
190/// The fields compiled code needs to access to utilize a WebAssembly table
191/// imported from another instance.
192#[derive(Clone)]
193#[repr(C)]
194pub struct VMTableImport {
195    /// A pointer to the imported table description.
196    pub definition: NonNull<VMTableDefinition>,
197
198    /// Handle to the `VMTable` in the context.
199    pub handle: InternalStoreHandle<VMTable>,
200}
201
202#[cfg(test)]
203mod test_vmtable_import {
204    use super::VMTableImport;
205    use crate::VMOffsets;
206    use memoffset::offset_of;
207    use std::mem::size_of;
208    use wasmer_types::ModuleInfo;
209
210    #[test]
211    fn check_vmtable_import_offsets() {
212        let module = ModuleInfo::new();
213        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
214        assert_eq!(
215            size_of::<VMTableImport>(),
216            usize::from(offsets.size_of_vmtable_import())
217        );
218        assert_eq!(
219            offset_of!(VMTableImport, definition),
220            usize::from(offsets.vmtable_import_definition())
221        );
222    }
223}
224
225/// The fields compiled code needs to access to utilize a WebAssembly linear
226/// memory imported from another instance.
227#[derive(Clone)]
228#[repr(C)]
229pub struct VMMemoryImport {
230    /// A pointer to the imported memory description.
231    pub definition: NonNull<VMMemoryDefinition>,
232
233    /// A handle to the `Memory` that owns the memory description.
234    pub handle: InternalStoreHandle<VMMemory>,
235}
236
237#[cfg(test)]
238mod test_vmmemory_import {
239    use super::VMMemoryImport;
240    use crate::VMOffsets;
241    use memoffset::offset_of;
242    use std::mem::size_of;
243    use wasmer_types::ModuleInfo;
244
245    #[test]
246    fn check_vmmemory_import_offsets() {
247        let module = ModuleInfo::new();
248        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
249        assert_eq!(
250            size_of::<VMMemoryImport>(),
251            usize::from(offsets.size_of_vmmemory_import())
252        );
253        assert_eq!(
254            offset_of!(VMMemoryImport, definition),
255            usize::from(offsets.vmmemory_import_definition())
256        );
257        assert_eq!(
258            offset_of!(VMMemoryImport, handle),
259            usize::from(offsets.vmmemory_import_handle())
260        );
261    }
262}
263
264/// The fields compiled code needs to access to utilize a WebAssembly global
265/// variable imported from another instance.
266#[derive(Clone)]
267#[repr(C)]
268pub struct VMGlobalImport {
269    /// A pointer to the imported global variable description.
270    pub definition: NonNull<VMGlobalDefinition>,
271
272    /// A handle to the `Global` that owns the global description.
273    pub handle: InternalStoreHandle<VMGlobal>,
274}
275
276/// # Safety
277/// This data is safe to share between threads because it's plain data that
278/// is the user's responsibility to synchronize. Additionally, all operations
279/// on `from` are thread-safe through the use of a mutex in [`VMGlobal`].
280unsafe impl Send for VMGlobalImport {}
281/// # Safety
282/// This data is safe to share between threads because it's plain data that
283/// is the user's responsibility to synchronize. And because it's `Clone`, there's
284/// really no difference between passing it by reference or by value as far as
285/// correctness in a multi-threaded context is concerned.
286unsafe impl Sync for VMGlobalImport {}
287
288#[cfg(test)]
289mod test_vmglobal_import {
290    use super::VMGlobalImport;
291    use crate::VMOffsets;
292    use memoffset::offset_of;
293    use std::mem::size_of;
294    use wasmer_types::ModuleInfo;
295
296    #[test]
297    fn check_vmglobal_import_offsets() {
298        let module = ModuleInfo::new();
299        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
300        assert_eq!(
301            size_of::<VMGlobalImport>(),
302            usize::from(offsets.size_of_vmglobal_import())
303        );
304        assert_eq!(
305            offset_of!(VMGlobalImport, definition),
306            usize::from(offsets.vmglobal_import_definition())
307        );
308    }
309}
310
311/// Do an unsynchronized, non-atomic `memory.copy` for the memory.
312///
313/// # Errors
314///
315/// Returns a `Trap` error when the source or destination ranges are out of
316/// bounds.
317///
318/// # Safety
319/// The memory is not copied atomically and is not synchronized: it's the
320/// caller's responsibility to synchronize.
321pub(crate) unsafe fn memory_copy(
322    mem: &VMMemoryDefinition,
323    dst: u32,
324    src: u32,
325    len: u32,
326) -> Result<(), Trap> {
327    unsafe {
328        // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
329        if src
330            .checked_add(len)
331            .is_none_or(|n| usize::try_from(n).unwrap() > mem.current_length)
332            || dst
333                .checked_add(len)
334                .is_none_or(|m| usize::try_from(m).unwrap() > mem.current_length)
335        {
336            return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
337        }
338
339        let dst = usize::try_from(dst).unwrap();
340        let src = usize::try_from(src).unwrap();
341
342        // Bounds and casts are checked above, by this point we know that
343        // everything is safe.
344        let dst = mem.base.add(dst);
345        let src = mem.base.add(src);
346        ptr::copy(src, dst, len as usize);
347
348        Ok(())
349    }
350}
351
352/// Perform the `memory.fill` operation for the memory in an unsynchronized,
353/// non-atomic way.
354///
355/// # Errors
356///
357/// Returns a `Trap` error if the memory range is out of bounds.
358///
359/// # Safety
360/// The memory is not filled atomically and is not synchronized: it's the
361/// caller's responsibility to synchronize.
362pub(crate) unsafe fn memory_fill(
363    mem: &VMMemoryDefinition,
364    dst: u32,
365    val: u32,
366    len: u32,
367) -> Result<(), Trap> {
368    unsafe {
369        if dst
370            .checked_add(len)
371            .is_none_or(|m| usize::try_from(m).unwrap() > mem.current_length)
372        {
373            return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
374        }
375
376        let dst = isize::try_from(dst).unwrap();
377        let val = val as u8;
378
379        // Bounds and casts are checked above, by this point we know that
380        // everything is safe.
381        let dst = mem.base.offset(dst);
382        ptr::write_bytes(dst, val, len as usize);
383
384        Ok(())
385    }
386}
387
388/// Perform the `memory32.atomic.check32` operation for the memory. Return 0 if same, 1 if different
389///
390/// # Errors
391///
392/// Returns a `Trap` error if the memory range is out of bounds or 32bits unligned.
393///
394/// # Safety
395/// memory access is unsafe
396pub(crate) unsafe fn memory32_atomic_check32(
397    mem: &VMMemoryDefinition,
398    dst: u32,
399    val: u32,
400) -> Result<u32, Trap> {
401    unsafe {
402        if usize::try_from(dst).unwrap() > mem.current_length {
403            return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
404        }
405
406        let dst = isize::try_from(dst).unwrap();
407        if dst & 0b11 != 0 {
408            return Err(Trap::lib(TrapCode::UnalignedAtomic));
409        }
410
411        // Bounds and casts are checked above, by this point we know that
412        // everything is safe.
413        let dst = mem.base.offset(dst) as *mut u32;
414        let atomic_dst = AtomicPtr::new(dst);
415        let read_val = *atomic_dst.load(Ordering::Acquire);
416        let ret = if read_val == val { 0 } else { 1 };
417        Ok(ret)
418    }
419}
420
421/// Perform the `memory32.atomic.check64` operation for the memory. Return 0 if same, 1 if different
422///
423/// # Errors
424///
425/// Returns a `Trap` error if the memory range is out of bounds or 64bits unaligned.
426///
427/// # Safety
428/// memory access is unsafe
429pub(crate) unsafe fn memory32_atomic_check64(
430    mem: &VMMemoryDefinition,
431    dst: u32,
432    val: u64,
433) -> Result<u32, Trap> {
434    unsafe {
435        if usize::try_from(dst).unwrap() > mem.current_length {
436            return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
437        }
438
439        let dst = isize::try_from(dst).unwrap();
440        if dst & 0b111 != 0 {
441            return Err(Trap::lib(TrapCode::UnalignedAtomic));
442        }
443
444        // Bounds and casts are checked above, by this point we know that
445        // everything is safe.
446        let dst = mem.base.offset(dst) as *mut u64;
447        let atomic_dst = AtomicPtr::new(dst);
448        let read_val = *atomic_dst.load(Ordering::Acquire);
449        let ret = if read_val == val { 0 } else { 1 };
450        Ok(ret)
451    }
452}
453
454/// The fields compiled code needs to access to utilize a WebAssembly table
455/// defined within the instance.
456#[derive(Debug, Clone, Copy)]
457#[repr(C)]
458pub struct VMTableDefinition {
459    /// Pointer to the table data.
460    pub base: *mut u8,
461
462    /// The current number of elements in the table.
463    pub current_elements: u32,
464}
465
466#[cfg(test)]
467mod test_vmtable_definition {
468    use super::VMTableDefinition;
469    use crate::VMOffsets;
470    use memoffset::offset_of;
471    use std::mem::size_of;
472    use wasmer_types::ModuleInfo;
473
474    #[test]
475    fn check_vmtable_definition_offsets() {
476        let module = ModuleInfo::new();
477        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
478        assert_eq!(
479            size_of::<VMTableDefinition>(),
480            usize::from(offsets.size_of_vmtable_definition())
481        );
482        assert_eq!(
483            offset_of!(VMTableDefinition, base),
484            usize::from(offsets.vmtable_definition_base())
485        );
486        assert_eq!(
487            offset_of!(VMTableDefinition, current_elements),
488            usize::from(offsets.vmtable_definition_current_elements())
489        );
490    }
491}
492
493/// The storage for a WebAssembly global defined within the instance.
494///
495/// TODO: Pack the globals more densely, rather than using the same size
496/// for every type.
497#[derive(Debug, Clone)]
498#[repr(C, align(16))]
499pub struct VMGlobalDefinition {
500    /// Raw value of the global.
501    pub val: RawValue,
502}
503
504#[cfg(test)]
505mod test_vmglobal_definition {
506    use super::VMGlobalDefinition;
507    use crate::{VMFuncRef, VMOffsets};
508    use more_asserts::assert_ge;
509    use std::mem::{align_of, size_of};
510    use wasmer_types::ModuleInfo;
511
512    #[test]
513    fn check_vmglobal_definition_alignment() {
514        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<i32>());
515        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<i64>());
516        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<f32>());
517        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<f64>());
518        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<VMFuncRef>());
519        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<[u8; 16]>());
520    }
521
522    #[test]
523    fn check_vmglobal_definition_offsets() {
524        let module = ModuleInfo::new();
525        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
526        assert_eq!(
527            size_of::<*const VMGlobalDefinition>(),
528            usize::from(offsets.size_of_vmglobal_local())
529        );
530    }
531
532    #[test]
533    fn check_vmglobal_begins_aligned() {
534        let module = ModuleInfo::new();
535        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
536        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
537    }
538}
539
540impl VMGlobalDefinition {
541    /// Construct a `VMGlobalDefinition`.
542    pub fn new() -> Self {
543        Self {
544            val: Default::default(),
545        }
546    }
547}
548
549/// A tag index, unique within the Store in which the instance was created.
550/// Usable for translating module-local tag indices to store-unique ones.
551#[repr(C)]
552#[cfg_attr(feature = "artifact-size", derive(loupe::MemoryUsage))]
553pub struct VMSharedTagIndex(u32);
554
555impl VMSharedTagIndex {
556    /// Create a new `VMSharedTagIndex`.
557    pub fn new(value: u32) -> Self {
558        Self(value)
559    }
560
561    /// Get the inner value.
562    pub fn index(&self) -> u32 {
563        self.0
564    }
565}
566
567/// An index into the shared signature registry, usable for checking signatures
568/// at indirect calls.
569#[repr(C)]
570#[cfg_attr(feature = "artifact-size", derive(loupe::MemoryUsage))]
571#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)]
572pub struct VMSharedSignatureIndex(u32);
573
574#[cfg(test)]
575mod test_vmshared_signature_index {
576    use super::VMSharedSignatureIndex;
577    use std::mem::size_of;
578    use wasmer_types::{ModuleInfo, TargetSharedSignatureIndex, VMOffsets};
579
580    #[test]
581    fn check_vmshared_signature_index() {
582        let module = ModuleInfo::new();
583        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
584        assert_eq!(
585            size_of::<VMSharedSignatureIndex>(),
586            usize::from(offsets.size_of_vmshared_signature_index())
587        );
588    }
589
590    #[test]
591    fn check_target_shared_signature_index() {
592        assert_eq!(
593            size_of::<VMSharedSignatureIndex>(),
594            size_of::<TargetSharedSignatureIndex>()
595        );
596    }
597}
598
599impl VMSharedSignatureIndex {
600    /// Create a new `VMSharedSignatureIndex`.
601    pub fn new(value: u32) -> Self {
602        Self(value)
603    }
604}
605
606impl Default for VMSharedSignatureIndex {
607    fn default() -> Self {
608        Self::new(u32::MAX)
609    }
610}
611
612/// The VM caller-checked "anyfunc" record, for caller-side signature checking.
613/// It consists of the actual function pointer and a signature id to be checked
614/// by the caller.
615#[derive(Debug, Clone, Copy)]
616#[repr(C)]
617pub struct VMCallerCheckedAnyfunc {
618    /// Function body.
619    pub func_ptr: *const VMFunctionBody,
620    /// Function signature id.
621    pub type_index: VMSharedSignatureIndex,
622    /// Function `VMContext` or host env.
623    pub vmctx: VMFunctionContext,
624    /// Address of the function call trampoline to invoke this function using
625    /// a dynamic argument list.
626    pub call_trampoline: VMTrampoline,
627    // If more elements are added here, remember to add offset_of tests below!
628}
629
630impl PartialEq for VMCallerCheckedAnyfunc {
631    fn eq(&self, other: &Self) -> bool {
632        self.func_ptr == other.func_ptr
633            && self.type_index == other.type_index
634            && self.vmctx == other.vmctx
635            && ptr::fn_addr_eq(self.call_trampoline, other.call_trampoline)
636    }
637}
638
639impl Eq for VMCallerCheckedAnyfunc {}
640
641impl Hash for VMCallerCheckedAnyfunc {
642    fn hash<H: Hasher>(&self, state: &mut H) {
643        self.func_ptr.hash(state);
644        self.type_index.hash(state);
645        self.vmctx.hash(state);
646        ptr::hash(self.call_trampoline as *const (), state);
647    }
648}
649
650#[cfg(test)]
651mod test_vmcaller_checked_anyfunc {
652    use super::VMCallerCheckedAnyfunc;
653    use crate::VMOffsets;
654    use memoffset::offset_of;
655    use std::mem::size_of;
656    use wasmer_types::ModuleInfo;
657
658    #[test]
659    fn check_vmcaller_checked_anyfunc_offsets() {
660        let module = ModuleInfo::new();
661        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
662        assert_eq!(
663            size_of::<VMCallerCheckedAnyfunc>(),
664            usize::from(offsets.size_of_vmcaller_checked_anyfunc())
665        );
666        assert_eq!(
667            offset_of!(VMCallerCheckedAnyfunc, func_ptr),
668            usize::from(offsets.vmcaller_checked_anyfunc_func_ptr())
669        );
670        assert_eq!(
671            offset_of!(VMCallerCheckedAnyfunc, type_index),
672            usize::from(offsets.vmcaller_checked_anyfunc_type_index())
673        );
674        assert_eq!(
675            offset_of!(VMCallerCheckedAnyfunc, vmctx),
676            usize::from(offsets.vmcaller_checked_anyfunc_vmctx())
677        );
678    }
679}
680
681/// An array that stores addresses of builtin functions. We translate code
682/// to use indirect calls. This way, we don't have to patch the code.
683#[repr(C)]
684pub struct VMBuiltinFunctionsArray {
685    ptrs: [usize; Self::len()],
686}
687
688impl VMBuiltinFunctionsArray {
689    pub const fn len() -> usize {
690        VMBuiltinFunctionIndex::builtin_functions_total_number() as usize
691    }
692
693    pub fn initialized() -> Self {
694        use crate::libcalls::*;
695
696        let mut ptrs = [0; Self::len()];
697
698        ptrs[VMBuiltinFunctionIndex::get_memory32_grow_index().index() as *const () as usize] =
699            wasmer_vm_memory32_grow as *const () as usize;
700        ptrs[VMBuiltinFunctionIndex::get_imported_memory32_grow_index().index() as *const ()
701            as usize] = wasmer_vm_imported_memory32_grow as *const () as usize;
702        ptrs[VMBuiltinFunctionIndex::get_memory32_size_index().index() as *const () as usize] =
703            wasmer_vm_memory32_size as *const () as usize;
704        ptrs[VMBuiltinFunctionIndex::get_imported_memory32_size_index().index() as *const ()
705            as usize] = wasmer_vm_imported_memory32_size as *const () as usize;
706        ptrs[VMBuiltinFunctionIndex::get_table_copy_index().index() as *const () as usize] =
707            wasmer_vm_table_copy as *const () as usize;
708        ptrs[VMBuiltinFunctionIndex::get_table_init_index().index() as *const () as usize] =
709            wasmer_vm_table_init as *const () as usize;
710        ptrs[VMBuiltinFunctionIndex::get_elem_drop_index().index() as *const () as usize] =
711            wasmer_vm_elem_drop as *const () as usize;
712        ptrs[VMBuiltinFunctionIndex::get_memory_copy_index().index() as *const () as usize] =
713            wasmer_vm_memory32_copy as *const () as usize;
714        ptrs[VMBuiltinFunctionIndex::get_imported_memory_copy_index().index() as *const ()
715            as usize] = wasmer_vm_imported_memory32_copy as *const () as usize;
716        ptrs[VMBuiltinFunctionIndex::get_memory_fill_index().index() as *const () as usize] =
717            wasmer_vm_memory32_fill as *const () as usize;
718        ptrs[VMBuiltinFunctionIndex::get_imported_memory_fill_index().index() as *const ()
719            as usize] = wasmer_vm_imported_memory32_fill as *const () as usize;
720        ptrs[VMBuiltinFunctionIndex::get_memory_init_index().index() as *const () as usize] =
721            wasmer_vm_memory32_init as *const () as usize;
722        ptrs[VMBuiltinFunctionIndex::get_data_drop_index().index() as *const () as usize] =
723            wasmer_vm_data_drop as *const () as usize;
724        ptrs[VMBuiltinFunctionIndex::get_raise_trap_index().index() as *const () as usize] =
725            wasmer_vm_raise_trap as *const () as usize;
726        ptrs[VMBuiltinFunctionIndex::get_table_size_index().index() as *const () as usize] =
727            wasmer_vm_table_size as *const () as usize;
728        ptrs[VMBuiltinFunctionIndex::get_imported_table_size_index().index() as *const ()
729            as usize] = wasmer_vm_imported_table_size as *const () as usize;
730        ptrs[VMBuiltinFunctionIndex::get_table_grow_index().index() as *const () as usize] =
731            wasmer_vm_table_grow as *const () as usize;
732        ptrs[VMBuiltinFunctionIndex::get_imported_table_grow_index().index() as *const ()
733            as usize] = wasmer_vm_imported_table_grow as *const () as usize;
734        ptrs[VMBuiltinFunctionIndex::get_table_get_index().index() as *const () as usize] =
735            wasmer_vm_table_get as *const () as usize;
736        ptrs[VMBuiltinFunctionIndex::get_imported_table_get_index().index() as *const ()
737            as usize] = wasmer_vm_imported_table_get as *const () as usize;
738        ptrs[VMBuiltinFunctionIndex::get_table_set_index().index() as usize] =
739            wasmer_vm_table_set as *const () as usize;
740        ptrs[VMBuiltinFunctionIndex::get_imported_table_set_index().index() as usize] =
741            wasmer_vm_imported_table_set as *const () as usize;
742        ptrs[VMBuiltinFunctionIndex::get_func_ref_index().index() as usize] =
743            wasmer_vm_func_ref as *const () as usize;
744        ptrs[VMBuiltinFunctionIndex::get_table_fill_index().index() as usize] =
745            wasmer_vm_table_fill as *const () as usize;
746        ptrs[VMBuiltinFunctionIndex::get_memory_atomic_wait32_index().index() as usize] =
747            wasmer_vm_memory32_atomic_wait32 as *const () as usize;
748        ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_wait32_index().index() as usize] =
749            wasmer_vm_imported_memory32_atomic_wait32 as *const () as usize;
750        ptrs[VMBuiltinFunctionIndex::get_memory_atomic_wait64_index().index() as usize] =
751            wasmer_vm_memory32_atomic_wait64 as *const () as usize;
752        ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_wait64_index().index() as usize] =
753            wasmer_vm_imported_memory32_atomic_wait64 as *const () as usize;
754        ptrs[VMBuiltinFunctionIndex::get_memory_atomic_notify_index().index() as usize] =
755            wasmer_vm_memory32_atomic_notify as *const () as usize;
756        ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_notify_index().index() as usize] =
757            wasmer_vm_imported_memory32_atomic_notify as *const () as usize;
758        ptrs[VMBuiltinFunctionIndex::get_imported_debug_usize_index().index() as usize] =
759            wasmer_vm_dbg_usize as *const () as usize;
760        ptrs[VMBuiltinFunctionIndex::get_imported_debug_str_index().index() as usize] =
761            wasmer_vm_dbg_str as *const () as usize;
762        ptrs[VMBuiltinFunctionIndex::get_imported_personality2_index().index() as usize] =
763            wasmer_eh_personality2 as *const () as usize;
764        ptrs[VMBuiltinFunctionIndex::get_imported_alloc_exception_index().index() as usize] =
765            wasmer_vm_alloc_exception as *const () as usize;
766        ptrs[VMBuiltinFunctionIndex::get_imported_throw_index().index() as usize] =
767            wasmer_vm_throw as *const () as usize;
768        ptrs[VMBuiltinFunctionIndex::get_imported_read_exnref_index().index() as usize] =
769            wasmer_vm_read_exnref as *const () as usize;
770        ptrs[VMBuiltinFunctionIndex::get_imported_exception_into_exnref_index().index() as usize] =
771            wasmer_vm_exception_into_exnref as *const () as usize;
772
773        debug_assert!(ptrs.iter().cloned().all(|p| p != 0));
774
775        Self { ptrs }
776    }
777}
778
779/// The VM "context", which is pointed to by the `vmctx` arg in the compiler.
780/// This has information about globals, memories, tables, and other runtime
781/// state associated with the current instance.
782///
783/// The struct here is empty, as the sizes of these fields are dynamic, and
784/// we can't describe them in Rust's type system. Sufficient memory is
785/// allocated at runtime.
786///
787/// TODO: We could move the globals into the `vmctx` allocation too.
788#[derive(Debug)]
789#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
790pub struct VMContext {}
791
792impl VMContext {
793    /// Return a mutable reference to the associated `Instance`.
794    ///
795    /// # Safety
796    /// This is unsafe because it doesn't work on just any `VMContext`, it must
797    /// be a `VMContext` allocated as part of an `Instance`.
798    #[allow(clippy::cast_ptr_alignment)]
799    #[inline]
800    pub(crate) unsafe fn instance(&self) -> &Instance {
801        unsafe {
802            &*((self as *const Self as *mut u8).offset(-Instance::vmctx_offset())
803                as *const Instance)
804        }
805    }
806
807    #[inline]
808    pub(crate) unsafe fn instance_mut(&mut self) -> &mut Instance {
809        unsafe {
810            &mut *((self as *const Self as *mut u8).offset(-Instance::vmctx_offset())
811                as *mut Instance)
812        }
813    }
814}
815
816/// The type for tramplines in the VM.
817pub type VMTrampoline = unsafe extern "C" fn(
818    *mut VMContext,        // callee vmctx
819    *const VMFunctionBody, // function we're actually calling
820    *mut RawValue,         // space for arguments and return values
821);
822
823/// The fields compiled code needs to access to utilize a WebAssembly linear
824/// memory defined within the instance, namely the start address and the
825/// size in bytes.
826#[derive(Debug, Copy, Clone)]
827#[repr(C)]
828pub struct VMMemoryDefinition {
829    /// The start address which is always valid, even if the memory grows.
830    pub base: *mut u8,
831
832    /// The current logical size of this linear memory in bytes.
833    pub current_length: usize,
834}
835
836/// # Safety
837/// This data is safe to share between threads because it's plain data that
838/// is the user's responsibility to synchronize.
839unsafe impl Send for VMMemoryDefinition {}
840/// # Safety
841/// This data is safe to share between threads because it's plain data that
842/// is the user's responsibility to synchronize. And it's `Copy` so there's
843/// really no difference between passing it by reference or by value as far as
844/// correctness in a multi-threaded context is concerned.
845unsafe impl Sync for VMMemoryDefinition {}
846
847#[cfg(test)]
848mod test_vmmemory_definition {
849    use super::VMMemoryDefinition;
850    use crate::VMOffsets;
851    use memoffset::offset_of;
852    use std::mem::size_of;
853    use wasmer_types::ModuleInfo;
854
855    #[test]
856    fn check_vmmemory_definition_offsets() {
857        let module = ModuleInfo::new();
858        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
859        assert_eq!(
860            size_of::<VMMemoryDefinition>(),
861            usize::from(offsets.size_of_vmmemory_definition())
862        );
863        assert_eq!(
864            offset_of!(VMMemoryDefinition, base),
865            usize::from(offsets.vmmemory_definition_base())
866        );
867        assert_eq!(
868            offset_of!(VMMemoryDefinition, current_length),
869            usize::from(offsets.vmmemory_definition_current_length())
870        );
871    }
872}