wasmer_vm/
vmcontext.rs

1// This file contains code from external sources.
2// Attributions: https://github.com/wasmerio/wasmer/blob/main/docs/ATTRIBUTIONS.md
3
4//! This file declares `VMContext` and several related structs which contain
5//! fields that compiled wasm code accesses directly.
6
7use crate::VMFunctionBody;
8use crate::VMTable;
9use crate::global::VMGlobal;
10use crate::instance::Instance;
11use crate::memory::VMMemory;
12use crate::store::InternalStoreHandle;
13use crate::trap::{Trap, TrapCode};
14use crate::{VMBuiltinFunctionIndex, VMFunction};
15use std::convert::TryFrom;
16use std::hash::{Hash, Hasher};
17use std::ptr::{self, NonNull};
18use std::sync::atomic::{AtomicPtr, Ordering};
19use wasmer_types::RawValue;
20
21/// Union representing the first parameter passed when calling a function.
22///
23/// It may either be a pointer to the [`VMContext`] if it's a Wasm function
24/// or a pointer to arbitrary data controlled by the host if it's a host function.
25#[derive(Copy, Clone, Eq)]
26#[repr(C)]
27pub union VMFunctionContext {
28    /// Wasm functions take a pointer to [`VMContext`].
29    pub vmctx: *mut VMContext,
30    /// Host functions can have custom environments.
31    pub host_env: *mut std::ffi::c_void,
32}
33
34impl VMFunctionContext {
35    /// Check whether the pointer stored is null or not.
36    pub fn is_null(&self) -> bool {
37        unsafe { self.host_env.is_null() }
38    }
39}
40
41impl std::fmt::Debug for VMFunctionContext {
42    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
43        f.debug_struct("VMFunctionContext")
44            .field("vmctx_or_hostenv", unsafe { &self.host_env })
45            .finish()
46    }
47}
48
49impl std::cmp::PartialEq for VMFunctionContext {
50    fn eq(&self, rhs: &Self) -> bool {
51        unsafe { std::ptr::eq(self.host_env, rhs.host_env) }
52    }
53}
54
55impl std::hash::Hash for VMFunctionContext {
56    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
57        unsafe {
58            self.vmctx.hash(state);
59        }
60    }
61}
62
63/// An imported function.
64#[derive(Debug, Copy, Clone)]
65#[repr(C)]
66pub struct VMFunctionImport {
67    /// A pointer to the imported function body.
68    pub body: *const VMFunctionBody,
69
70    /// A pointer to the `VMContext` that owns the function or host env data.
71    pub environment: VMFunctionContext,
72
73    /// Handle to the `VMFunction` in the context.
74    pub handle: InternalStoreHandle<VMFunction>,
75}
76
77#[cfg(test)]
78mod test_vmfunction_import {
79    use super::VMFunctionImport;
80    use memoffset::offset_of;
81    use std::mem::size_of;
82    use wasmer_types::ModuleInfo;
83    use wasmer_types::VMOffsets;
84
85    #[test]
86    fn check_vmfunction_import_offsets() {
87        let module = ModuleInfo::new();
88        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
89        assert_eq!(
90            size_of::<VMFunctionImport>(),
91            usize::from(offsets.size_of_vmfunction_import())
92        );
93        assert_eq!(
94            offset_of!(VMFunctionImport, body),
95            usize::from(offsets.vmfunction_import_body())
96        );
97        assert_eq!(
98            offset_of!(VMFunctionImport, environment),
99            usize::from(offsets.vmfunction_import_vmctx())
100        );
101    }
102}
103
104/// The `VMDynamicFunctionContext` is the context that dynamic
105/// functions will receive when called (rather than `vmctx`).
106/// A dynamic function is a function for which we don't know the signature
107/// until runtime.
108///
109/// As such, we need to expose the dynamic function `context`
110/// containing the relevant context for running the function indicated
111/// in `address`.
112#[repr(C)]
113pub struct VMDynamicFunctionContext<T> {
114    /// The address of the inner dynamic function.
115    ///
116    /// Note: The function must be on the form of
117    /// `(*mut T, SignatureIndex, *mut i128)`.
118    pub address: *const VMFunctionBody,
119
120    /// The context that the inner dynamic function will receive.
121    pub ctx: T,
122}
123
124// The `ctx` itself must be `Send`, `address` can be passed between
125// threads because all usage is `unsafe` and synchronized.
126unsafe impl<T: Sized + Send + Sync> Send for VMDynamicFunctionContext<T> {}
127// The `ctx` itself must be `Sync`, `address` can be shared between
128// threads because all usage is `unsafe` and synchronized.
129unsafe impl<T: Sized + Send + Sync> Sync for VMDynamicFunctionContext<T> {}
130
131impl<T: Sized + Clone + Send + Sync> Clone for VMDynamicFunctionContext<T> {
132    fn clone(&self) -> Self {
133        Self {
134            address: self.address,
135            ctx: self.ctx.clone(),
136        }
137    }
138}
139
140#[cfg(test)]
141mod test_vmdynamicfunction_import_context {
142    use super::VMDynamicFunctionContext;
143    use crate::VMOffsets;
144    use memoffset::offset_of;
145    use std::mem::size_of;
146    use wasmer_types::ModuleInfo;
147
148    #[test]
149    fn check_vmdynamicfunction_import_context_offsets() {
150        let module = ModuleInfo::new();
151        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
152        assert_eq!(
153            size_of::<VMDynamicFunctionContext<usize>>(),
154            usize::from(offsets.size_of_vmdynamicfunction_import_context())
155        );
156        assert_eq!(
157            offset_of!(VMDynamicFunctionContext<usize>, address),
158            usize::from(offsets.vmdynamicfunction_import_context_address())
159        );
160        assert_eq!(
161            offset_of!(VMDynamicFunctionContext<usize>, ctx),
162            usize::from(offsets.vmdynamicfunction_import_context_ctx())
163        );
164    }
165}
166
167/// A function kind is a calling convention into and out of wasm code.
168#[derive(Debug, Copy, Clone, Eq, PartialEq)]
169#[repr(C)]
170pub enum VMFunctionKind {
171    /// A static function has the native signature:
172    /// `extern "C" (vmctx, arg1, arg2...) -> (result1, result2, ...)`.
173    ///
174    /// This is the default for functions that are defined:
175    /// 1. In the Host, natively
176    /// 2. In the WebAssembly file
177    Static,
178
179    /// A dynamic function has the native signature:
180    /// `extern "C" (ctx, &[Value]) -> Vec<Value>`.
181    ///
182    /// This is the default for functions that are defined:
183    /// 1. In the Host, dynamically
184    Dynamic,
185}
186
187/// The fields compiled code needs to access to utilize a WebAssembly table
188/// imported from another instance.
189#[derive(Clone)]
190#[repr(C)]
191pub struct VMTableImport {
192    /// A pointer to the imported table description.
193    pub definition: NonNull<VMTableDefinition>,
194
195    /// Handle to the `VMTable` in the context.
196    pub handle: InternalStoreHandle<VMTable>,
197}
198
199#[cfg(test)]
200mod test_vmtable_import {
201    use super::VMTableImport;
202    use crate::VMOffsets;
203    use memoffset::offset_of;
204    use std::mem::size_of;
205    use wasmer_types::ModuleInfo;
206
207    #[test]
208    fn check_vmtable_import_offsets() {
209        let module = ModuleInfo::new();
210        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
211        assert_eq!(
212            size_of::<VMTableImport>(),
213            usize::from(offsets.size_of_vmtable_import())
214        );
215        assert_eq!(
216            offset_of!(VMTableImport, definition),
217            usize::from(offsets.vmtable_import_definition())
218        );
219    }
220}
221
222/// The fields compiled code needs to access to utilize a WebAssembly linear
223/// memory imported from another instance.
224#[derive(Clone)]
225#[repr(C)]
226pub struct VMMemoryImport {
227    /// A pointer to the imported memory description.
228    pub definition: NonNull<VMMemoryDefinition>,
229
230    /// A handle to the `Memory` that owns the memory description.
231    pub handle: InternalStoreHandle<VMMemory>,
232}
233
234#[cfg(test)]
235mod test_vmmemory_import {
236    use super::VMMemoryImport;
237    use crate::VMOffsets;
238    use memoffset::offset_of;
239    use std::mem::size_of;
240    use wasmer_types::ModuleInfo;
241
242    #[test]
243    fn check_vmmemory_import_offsets() {
244        let module = ModuleInfo::new();
245        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
246        assert_eq!(
247            size_of::<VMMemoryImport>(),
248            usize::from(offsets.size_of_vmmemory_import())
249        );
250        assert_eq!(
251            offset_of!(VMMemoryImport, definition),
252            usize::from(offsets.vmmemory_import_definition())
253        );
254        assert_eq!(
255            offset_of!(VMMemoryImport, handle),
256            usize::from(offsets.vmmemory_import_handle())
257        );
258    }
259}
260
261/// The fields compiled code needs to access to utilize a WebAssembly global
262/// variable imported from another instance.
263#[derive(Clone)]
264#[repr(C)]
265pub struct VMGlobalImport {
266    /// A pointer to the imported global variable description.
267    pub definition: NonNull<VMGlobalDefinition>,
268
269    /// A handle to the `Global` that owns the global description.
270    pub handle: InternalStoreHandle<VMGlobal>,
271}
272
273/// # Safety
274/// This data is safe to share between threads because it's plain data that
275/// is the user's responsibility to synchronize. Additionally, all operations
276/// on `from` are thread-safe through the use of a mutex in [`VMGlobal`].
277unsafe impl Send for VMGlobalImport {}
278/// # Safety
279/// This data is safe to share between threads because it's plain data that
280/// is the user's responsibility to synchronize. And because it's `Clone`, there's
281/// really no difference between passing it by reference or by value as far as
282/// correctness in a multi-threaded context is concerned.
283unsafe impl Sync for VMGlobalImport {}
284
285#[cfg(test)]
286mod test_vmglobal_import {
287    use super::VMGlobalImport;
288    use crate::VMOffsets;
289    use memoffset::offset_of;
290    use std::mem::size_of;
291    use wasmer_types::ModuleInfo;
292
293    #[test]
294    fn check_vmglobal_import_offsets() {
295        let module = ModuleInfo::new();
296        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
297        assert_eq!(
298            size_of::<VMGlobalImport>(),
299            usize::from(offsets.size_of_vmglobal_import())
300        );
301        assert_eq!(
302            offset_of!(VMGlobalImport, definition),
303            usize::from(offsets.vmglobal_import_definition())
304        );
305    }
306}
307
308/// Do an unsynchronized, non-atomic `memory.copy` for the memory.
309///
310/// # Errors
311///
312/// Returns a `Trap` error when the source or destination ranges are out of
313/// bounds.
314///
315/// # Safety
316/// The memory is not copied atomically and is not synchronized: it's the
317/// caller's responsibility to synchronize.
318pub(crate) unsafe fn memory_copy(
319    mem: &VMMemoryDefinition,
320    dst: u32,
321    src: u32,
322    len: u32,
323) -> Result<(), Trap> {
324    unsafe {
325        // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
326        if src
327            .checked_add(len)
328            .is_none_or(|n| usize::try_from(n).unwrap() > mem.current_length)
329            || dst
330                .checked_add(len)
331                .is_none_or(|m| usize::try_from(m).unwrap() > mem.current_length)
332        {
333            return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
334        }
335
336        let dst = usize::try_from(dst).unwrap();
337        let src = usize::try_from(src).unwrap();
338
339        // Bounds and casts are checked above, by this point we know that
340        // everything is safe.
341        let dst = mem.base.add(dst);
342        let src = mem.base.add(src);
343        ptr::copy(src, dst, len as usize);
344
345        Ok(())
346    }
347}
348
349/// Perform the `memory.fill` operation for the memory in an unsynchronized,
350/// non-atomic way.
351///
352/// # Errors
353///
354/// Returns a `Trap` error if the memory range is out of bounds.
355///
356/// # Safety
357/// The memory is not filled atomically and is not synchronized: it's the
358/// caller's responsibility to synchronize.
359pub(crate) unsafe fn memory_fill(
360    mem: &VMMemoryDefinition,
361    dst: u32,
362    val: u32,
363    len: u32,
364) -> Result<(), Trap> {
365    unsafe {
366        if dst
367            .checked_add(len)
368            .is_none_or(|m| usize::try_from(m).unwrap() > mem.current_length)
369        {
370            return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
371        }
372
373        let dst = isize::try_from(dst).unwrap();
374        let val = val as u8;
375
376        // Bounds and casts are checked above, by this point we know that
377        // everything is safe.
378        let dst = mem.base.offset(dst);
379        ptr::write_bytes(dst, val, len as usize);
380
381        Ok(())
382    }
383}
384
385/// Perform the `memory32.atomic.check32` operation for the memory. Return 0 if same, 1 if different
386///
387/// # Errors
388///
389/// Returns a `Trap` error if the memory range is out of bounds or 32bits unligned.
390///
391/// # Safety
392/// memory access is unsafe
393pub(crate) unsafe fn memory32_atomic_check32(
394    mem: &VMMemoryDefinition,
395    dst: u32,
396    val: u32,
397) -> Result<u32, Trap> {
398    unsafe {
399        if usize::try_from(dst).unwrap() > mem.current_length {
400            return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
401        }
402
403        let dst = isize::try_from(dst).unwrap();
404        if dst & 0b11 != 0 {
405            return Err(Trap::lib(TrapCode::UnalignedAtomic));
406        }
407
408        // Bounds and casts are checked above, by this point we know that
409        // everything is safe.
410        let dst = mem.base.offset(dst) as *mut u32;
411        let atomic_dst = AtomicPtr::new(dst);
412        let read_val = *atomic_dst.load(Ordering::Acquire);
413        let ret = if read_val == val { 0 } else { 1 };
414        Ok(ret)
415    }
416}
417
418/// Perform the `memory32.atomic.check64` operation for the memory. Return 0 if same, 1 if different
419///
420/// # Errors
421///
422/// Returns a `Trap` error if the memory range is out of bounds or 64bits unaligned.
423///
424/// # Safety
425/// memory access is unsafe
426pub(crate) unsafe fn memory32_atomic_check64(
427    mem: &VMMemoryDefinition,
428    dst: u32,
429    val: u64,
430) -> Result<u32, Trap> {
431    unsafe {
432        if usize::try_from(dst).unwrap() > mem.current_length {
433            return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
434        }
435
436        let dst = isize::try_from(dst).unwrap();
437        if dst & 0b111 != 0 {
438            return Err(Trap::lib(TrapCode::UnalignedAtomic));
439        }
440
441        // Bounds and casts are checked above, by this point we know that
442        // everything is safe.
443        let dst = mem.base.offset(dst) as *mut u64;
444        let atomic_dst = AtomicPtr::new(dst);
445        let read_val = *atomic_dst.load(Ordering::Acquire);
446        let ret = if read_val == val { 0 } else { 1 };
447        Ok(ret)
448    }
449}
450
451/// The fields compiled code needs to access to utilize a WebAssembly table
452/// defined within the instance.
453#[derive(Debug, Clone, Copy)]
454#[repr(C)]
455pub struct VMTableDefinition {
456    /// Pointer to the table data.
457    pub base: *mut u8,
458
459    /// The current number of elements in the table.
460    pub current_elements: u32,
461}
462
463#[cfg(test)]
464mod test_vmtable_definition {
465    use super::VMTableDefinition;
466    use crate::VMOffsets;
467    use memoffset::offset_of;
468    use std::mem::size_of;
469    use wasmer_types::ModuleInfo;
470
471    #[test]
472    fn check_vmtable_definition_offsets() {
473        let module = ModuleInfo::new();
474        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
475        assert_eq!(
476            size_of::<VMTableDefinition>(),
477            usize::from(offsets.size_of_vmtable_definition())
478        );
479        assert_eq!(
480            offset_of!(VMTableDefinition, base),
481            usize::from(offsets.vmtable_definition_base())
482        );
483        assert_eq!(
484            offset_of!(VMTableDefinition, current_elements),
485            usize::from(offsets.vmtable_definition_current_elements())
486        );
487    }
488}
489
490/// The storage for a WebAssembly global defined within the instance.
491///
492/// TODO: Pack the globals more densely, rather than using the same size
493/// for every type.
494#[derive(Debug, Clone)]
495#[repr(C, align(16))]
496pub struct VMGlobalDefinition {
497    /// Raw value of the global.
498    pub val: RawValue,
499}
500
501#[cfg(test)]
502mod test_vmglobal_definition {
503    use super::VMGlobalDefinition;
504    use crate::{VMFuncRef, VMOffsets};
505    use more_asserts::assert_ge;
506    use std::mem::{align_of, size_of};
507    use wasmer_types::ModuleInfo;
508
509    #[test]
510    fn check_vmglobal_definition_alignment() {
511        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<i32>());
512        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<i64>());
513        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<f32>());
514        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<f64>());
515        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<VMFuncRef>());
516        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<[u8; 16]>());
517    }
518
519    #[test]
520    fn check_vmglobal_definition_offsets() {
521        let module = ModuleInfo::new();
522        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
523        assert_eq!(
524            size_of::<*const VMGlobalDefinition>(),
525            usize::from(offsets.size_of_vmglobal_local())
526        );
527    }
528
529    #[test]
530    fn check_vmglobal_begins_aligned() {
531        let module = ModuleInfo::new();
532        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
533        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
534    }
535}
536
537impl VMGlobalDefinition {
538    /// Construct a `VMGlobalDefinition`.
539    pub fn new() -> Self {
540        Self {
541            val: Default::default(),
542        }
543    }
544}
545
546/// A tag index, unique within the Store in which the instance was created.
547/// Usable for translating module-local tag indices to store-unique ones.
548#[repr(C)]
549#[cfg_attr(feature = "artifact-size", derive(loupe::MemoryUsage))]
550pub struct VMSharedTagIndex(u32);
551
552impl VMSharedTagIndex {
553    /// Create a new `VMSharedTagIndex`.
554    pub fn new(value: u32) -> Self {
555        Self(value)
556    }
557
558    /// Get the inner value.
559    pub fn index(&self) -> u32 {
560        self.0
561    }
562}
563
564/// An index into the shared signature registry, usable for checking signatures
565/// at indirect calls.
566#[repr(C)]
567#[cfg_attr(feature = "artifact-size", derive(loupe::MemoryUsage))]
568#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)]
569pub struct VMSharedSignatureIndex(u32);
570
571#[cfg(test)]
572mod test_vmshared_signature_index {
573    use super::VMSharedSignatureIndex;
574    use std::mem::size_of;
575    use wasmer_types::{ModuleInfo, TargetSharedSignatureIndex, VMOffsets};
576
577    #[test]
578    fn check_vmshared_signature_index() {
579        let module = ModuleInfo::new();
580        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
581        assert_eq!(
582            size_of::<VMSharedSignatureIndex>(),
583            usize::from(offsets.size_of_vmshared_signature_index())
584        );
585    }
586
587    #[test]
588    fn check_target_shared_signature_index() {
589        assert_eq!(
590            size_of::<VMSharedSignatureIndex>(),
591            size_of::<TargetSharedSignatureIndex>()
592        );
593    }
594}
595
596impl VMSharedSignatureIndex {
597    /// Create a new `VMSharedSignatureIndex`.
598    pub fn new(value: u32) -> Self {
599        Self(value)
600    }
601}
602
603impl Default for VMSharedSignatureIndex {
604    fn default() -> Self {
605        Self::new(u32::MAX)
606    }
607}
608
609/// The VM caller-checked "anyfunc" record, for caller-side signature checking.
610/// It consists of the actual function pointer and a signature id to be checked
611/// by the caller.
612#[derive(Debug, Clone, Copy)]
613#[repr(C)]
614pub struct VMCallerCheckedAnyfunc {
615    /// Function body.
616    pub func_ptr: *const VMFunctionBody,
617    /// Function signature id.
618    pub type_index: VMSharedSignatureIndex,
619    /// Function `VMContext` or host env.
620    pub vmctx: VMFunctionContext,
621    /// Address of the function call trampoline to invoke this function using
622    /// a dynamic argument list.
623    pub call_trampoline: VMTrampoline,
624    // If more elements are added here, remember to add offset_of tests below!
625}
626
627impl PartialEq for VMCallerCheckedAnyfunc {
628    fn eq(&self, other: &Self) -> bool {
629        self.func_ptr == other.func_ptr
630            && self.type_index == other.type_index
631            && self.vmctx == other.vmctx
632            && ptr::fn_addr_eq(self.call_trampoline, other.call_trampoline)
633    }
634}
635
636impl Eq for VMCallerCheckedAnyfunc {}
637
638impl Hash for VMCallerCheckedAnyfunc {
639    fn hash<H: Hasher>(&self, state: &mut H) {
640        self.func_ptr.hash(state);
641        self.type_index.hash(state);
642        self.vmctx.hash(state);
643        ptr::hash(self.call_trampoline as *const (), state);
644    }
645}
646
647#[cfg(test)]
648mod test_vmcaller_checked_anyfunc {
649    use super::VMCallerCheckedAnyfunc;
650    use crate::VMOffsets;
651    use memoffset::offset_of;
652    use std::mem::size_of;
653    use wasmer_types::ModuleInfo;
654
655    #[test]
656    fn check_vmcaller_checked_anyfunc_offsets() {
657        let module = ModuleInfo::new();
658        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
659        assert_eq!(
660            size_of::<VMCallerCheckedAnyfunc>(),
661            usize::from(offsets.size_of_vmcaller_checked_anyfunc())
662        );
663        assert_eq!(
664            offset_of!(VMCallerCheckedAnyfunc, func_ptr),
665            usize::from(offsets.vmcaller_checked_anyfunc_func_ptr())
666        );
667        assert_eq!(
668            offset_of!(VMCallerCheckedAnyfunc, type_index),
669            usize::from(offsets.vmcaller_checked_anyfunc_type_index())
670        );
671        assert_eq!(
672            offset_of!(VMCallerCheckedAnyfunc, vmctx),
673            usize::from(offsets.vmcaller_checked_anyfunc_vmctx())
674        );
675    }
676}
677
678/// An array that stores addresses of builtin functions. We translate code
679/// to use indirect calls. This way, we don't have to patch the code.
680#[repr(C)]
681pub struct VMBuiltinFunctionsArray {
682    ptrs: [usize; Self::len()],
683}
684
685impl VMBuiltinFunctionsArray {
686    pub const fn len() -> usize {
687        VMBuiltinFunctionIndex::builtin_functions_total_number() as usize
688    }
689
690    pub fn initialized() -> Self {
691        use crate::libcalls::*;
692
693        let mut ptrs = [0; Self::len()];
694
695        ptrs[VMBuiltinFunctionIndex::get_memory32_grow_index().index() as usize] =
696            wasmer_vm_memory32_grow as usize;
697        ptrs[VMBuiltinFunctionIndex::get_imported_memory32_grow_index().index() as usize] =
698            wasmer_vm_imported_memory32_grow as usize;
699
700        ptrs[VMBuiltinFunctionIndex::get_memory32_size_index().index() as usize] =
701            wasmer_vm_memory32_size as usize;
702        ptrs[VMBuiltinFunctionIndex::get_imported_memory32_size_index().index() as usize] =
703            wasmer_vm_imported_memory32_size as usize;
704
705        ptrs[VMBuiltinFunctionIndex::get_table_copy_index().index() as usize] =
706            wasmer_vm_table_copy as usize;
707
708        ptrs[VMBuiltinFunctionIndex::get_table_init_index().index() as usize] =
709            wasmer_vm_table_init as usize;
710        ptrs[VMBuiltinFunctionIndex::get_elem_drop_index().index() as usize] =
711            wasmer_vm_elem_drop as usize;
712
713        ptrs[VMBuiltinFunctionIndex::get_memory_copy_index().index() as usize] =
714            wasmer_vm_memory32_copy as usize;
715        ptrs[VMBuiltinFunctionIndex::get_imported_memory_copy_index().index() as usize] =
716            wasmer_vm_imported_memory32_copy as usize;
717        ptrs[VMBuiltinFunctionIndex::get_memory_fill_index().index() as usize] =
718            wasmer_vm_memory32_fill as usize;
719        ptrs[VMBuiltinFunctionIndex::get_imported_memory_fill_index().index() as usize] =
720            wasmer_vm_imported_memory32_fill as usize;
721        ptrs[VMBuiltinFunctionIndex::get_memory_init_index().index() as usize] =
722            wasmer_vm_memory32_init as usize;
723        ptrs[VMBuiltinFunctionIndex::get_data_drop_index().index() as usize] =
724            wasmer_vm_data_drop as usize;
725        ptrs[VMBuiltinFunctionIndex::get_raise_trap_index().index() as usize] =
726            wasmer_vm_raise_trap as usize;
727        ptrs[VMBuiltinFunctionIndex::get_table_size_index().index() as usize] =
728            wasmer_vm_table_size as usize;
729        ptrs[VMBuiltinFunctionIndex::get_imported_table_size_index().index() as usize] =
730            wasmer_vm_imported_table_size as usize;
731        ptrs[VMBuiltinFunctionIndex::get_table_grow_index().index() as usize] =
732            wasmer_vm_table_grow as usize;
733        ptrs[VMBuiltinFunctionIndex::get_imported_table_grow_index().index() as usize] =
734            wasmer_vm_imported_table_grow as usize;
735        ptrs[VMBuiltinFunctionIndex::get_table_get_index().index() as usize] =
736            wasmer_vm_table_get as usize;
737        ptrs[VMBuiltinFunctionIndex::get_imported_table_get_index().index() as usize] =
738            wasmer_vm_imported_table_get as usize;
739        ptrs[VMBuiltinFunctionIndex::get_table_set_index().index() as usize] =
740            wasmer_vm_table_set as usize;
741        ptrs[VMBuiltinFunctionIndex::get_imported_table_set_index().index() as usize] =
742            wasmer_vm_imported_table_set as usize;
743        ptrs[VMBuiltinFunctionIndex::get_func_ref_index().index() as usize] =
744            wasmer_vm_func_ref as usize;
745        ptrs[VMBuiltinFunctionIndex::get_table_fill_index().index() as usize] =
746            wasmer_vm_table_fill as usize;
747
748        ptrs[VMBuiltinFunctionIndex::get_memory_atomic_wait32_index().index() as usize] =
749            wasmer_vm_memory32_atomic_wait32 as usize;
750        ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_wait32_index().index() as usize] =
751            wasmer_vm_imported_memory32_atomic_wait32 as usize;
752        ptrs[VMBuiltinFunctionIndex::get_memory_atomic_wait64_index().index() as usize] =
753            wasmer_vm_memory32_atomic_wait64 as usize;
754        ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_wait64_index().index() as usize] =
755            wasmer_vm_imported_memory32_atomic_wait64 as usize;
756        ptrs[VMBuiltinFunctionIndex::get_memory_atomic_notify_index().index() as usize] =
757            wasmer_vm_memory32_atomic_notify as usize;
758        ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_notify_index().index() as usize] =
759            wasmer_vm_imported_memory32_atomic_notify as usize;
760        ptrs[VMBuiltinFunctionIndex::get_imported_throw_index().index() as usize] =
761            wasmer_vm_throw as usize;
762        ptrs[VMBuiltinFunctionIndex::get_imported_rethrow_index().index() as usize] =
763            wasmer_vm_rethrow as usize;
764
765        ptrs[VMBuiltinFunctionIndex::get_imported_alloc_exception_index().index() as usize] =
766            wasmer_vm_alloc_exception as usize;
767        ptrs[VMBuiltinFunctionIndex::get_imported_delete_exception_index().index() as usize] =
768            wasmer_vm_delete_exception as usize;
769        ptrs[VMBuiltinFunctionIndex::get_imported_read_exception_index().index() as usize] =
770            wasmer_vm_read_exception as usize;
771
772        ptrs[VMBuiltinFunctionIndex::get_imported_debug_usize_index().index() as usize] =
773            wasmer_vm_dbg_usize as usize;
774        ptrs[VMBuiltinFunctionIndex::get_imported_debug_str_index().index() as usize] =
775            wasmer_vm_dbg_str as usize;
776
777        debug_assert!(ptrs.iter().cloned().all(|p| p != 0));
778
779        Self { ptrs }
780    }
781}
782
783/// The VM "context", which is pointed to by the `vmctx` arg in the compiler.
784/// This has information about globals, memories, tables, and other runtime
785/// state associated with the current instance.
786///
787/// The struct here is empty, as the sizes of these fields are dynamic, and
788/// we can't describe them in Rust's type system. Sufficient memory is
789/// allocated at runtime.
790///
791/// TODO: We could move the globals into the `vmctx` allocation too.
792#[derive(Debug)]
793#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
794pub struct VMContext {}
795
796impl VMContext {
797    /// Return a mutable reference to the associated `Instance`.
798    ///
799    /// # Safety
800    /// This is unsafe because it doesn't work on just any `VMContext`, it must
801    /// be a `VMContext` allocated as part of an `Instance`.
802    #[allow(clippy::cast_ptr_alignment)]
803    #[inline]
804    pub(crate) unsafe fn instance(&self) -> &Instance {
805        unsafe {
806            &*((self as *const Self as *mut u8).offset(-Instance::vmctx_offset())
807                as *const Instance)
808        }
809    }
810
811    #[inline]
812    pub(crate) unsafe fn instance_mut(&mut self) -> &mut Instance {
813        unsafe {
814            &mut *((self as *const Self as *mut u8).offset(-Instance::vmctx_offset())
815                as *mut Instance)
816        }
817    }
818}
819
820/// The type for tramplines in the VM.
821pub type VMTrampoline = unsafe extern "C" fn(
822    *mut VMContext,        // callee vmctx
823    *const VMFunctionBody, // function we're actually calling
824    *mut RawValue,         // space for arguments and return values
825);
826
827/// The fields compiled code needs to access to utilize a WebAssembly linear
828/// memory defined within the instance, namely the start address and the
829/// size in bytes.
830#[derive(Debug, Copy, Clone)]
831#[repr(C)]
832pub struct VMMemoryDefinition {
833    /// The start address which is always valid, even if the memory grows.
834    pub base: *mut u8,
835
836    /// The current logical size of this linear memory in bytes.
837    pub current_length: usize,
838}
839
840/// # Safety
841/// This data is safe to share between threads because it's plain data that
842/// is the user's responsibility to synchronize.
843unsafe impl Send for VMMemoryDefinition {}
844/// # Safety
845/// This data is safe to share between threads because it's plain data that
846/// is the user's responsibility to synchronize. And it's `Copy` so there's
847/// really no difference between passing it by reference or by value as far as
848/// correctness in a multi-threaded context is concerned.
849unsafe impl Sync for VMMemoryDefinition {}
850
851#[cfg(test)]
852mod test_vmmemory_definition {
853    use super::VMMemoryDefinition;
854    use crate::VMOffsets;
855    use memoffset::offset_of;
856    use std::mem::size_of;
857    use wasmer_types::ModuleInfo;
858
859    #[test]
860    fn check_vmmemory_definition_offsets() {
861        let module = ModuleInfo::new();
862        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
863        assert_eq!(
864            size_of::<VMMemoryDefinition>(),
865            usize::from(offsets.size_of_vmmemory_definition())
866        );
867        assert_eq!(
868            offset_of!(VMMemoryDefinition, base),
869            usize::from(offsets.vmmemory_definition_base())
870        );
871        assert_eq!(
872            offset_of!(VMMemoryDefinition, current_length),
873            usize::from(offsets.vmmemory_definition_current_length())
874        );
875    }
876}