use crate::global::VMGlobal;
use crate::instance::Instance;
use crate::memory::VMMemory;
use crate::store::InternalStoreHandle;
use crate::trap::{Trap, TrapCode};
use crate::VMFunctionBody;
use crate::VMTable;
use crate::{VMBuiltinFunctionIndex, VMFunction};
use std::convert::TryFrom;
use std::ptr::{self, NonNull};
use std::sync::atomic::{AtomicPtr, Ordering};
use wasmer_types::RawValue;
#[derive(Copy, Clone, Eq)]
#[repr(C)]
pub union VMFunctionContext {
pub vmctx: *mut VMContext,
pub host_env: *mut std::ffi::c_void,
}
impl VMFunctionContext {
pub fn is_null(&self) -> bool {
unsafe { self.host_env.is_null() }
}
}
impl std::fmt::Debug for VMFunctionContext {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("VMFunctionContext")
.field("vmctx_or_hostenv", unsafe { &self.host_env })
.finish()
}
}
impl std::cmp::PartialEq for VMFunctionContext {
fn eq(&self, rhs: &Self) -> bool {
unsafe { self.host_env as usize == rhs.host_env as usize }
}
}
impl std::hash::Hash for VMFunctionContext {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
unsafe {
self.vmctx.hash(state);
}
}
}
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMFunctionImport {
pub body: *const VMFunctionBody,
pub environment: VMFunctionContext,
pub handle: InternalStoreHandle<VMFunction>,
}
#[cfg(test)]
mod test_vmfunction_import {
use super::VMFunctionImport;
use memoffset::offset_of;
use std::mem::size_of;
use wasmer_types::ModuleInfo;
use wasmer_types::VMOffsets;
#[test]
fn check_vmfunction_import_offsets() {
let module = ModuleInfo::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMFunctionImport>(),
usize::from(offsets.size_of_vmfunction_import())
);
assert_eq!(
offset_of!(VMFunctionImport, body),
usize::from(offsets.vmfunction_import_body())
);
assert_eq!(
offset_of!(VMFunctionImport, environment),
usize::from(offsets.vmfunction_import_vmctx())
);
}
}
#[repr(C)]
pub struct VMDynamicFunctionContext<T> {
pub address: *const VMFunctionBody,
pub ctx: T,
}
unsafe impl<T: Sized + Send + Sync> Send for VMDynamicFunctionContext<T> {}
unsafe impl<T: Sized + Send + Sync> Sync for VMDynamicFunctionContext<T> {}
impl<T: Sized + Clone + Send + Sync> Clone for VMDynamicFunctionContext<T> {
fn clone(&self) -> Self {
Self {
address: self.address,
ctx: self.ctx.clone(),
}
}
}
#[cfg(test)]
mod test_vmdynamicfunction_import_context {
use super::VMDynamicFunctionContext;
use crate::VMOffsets;
use memoffset::offset_of;
use std::mem::size_of;
use wasmer_types::ModuleInfo;
#[test]
fn check_vmdynamicfunction_import_context_offsets() {
let module = ModuleInfo::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMDynamicFunctionContext<usize>>(),
usize::from(offsets.size_of_vmdynamicfunction_import_context())
);
assert_eq!(
offset_of!(VMDynamicFunctionContext<usize>, address),
usize::from(offsets.vmdynamicfunction_import_context_address())
);
assert_eq!(
offset_of!(VMDynamicFunctionContext<usize>, ctx),
usize::from(offsets.vmdynamicfunction_import_context_ctx())
);
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(C)]
pub enum VMFunctionKind {
Static,
Dynamic,
}
#[derive(Clone)]
#[repr(C)]
pub struct VMTableImport {
pub definition: NonNull<VMTableDefinition>,
pub handle: InternalStoreHandle<VMTable>,
}
#[cfg(test)]
mod test_vmtable_import {
use super::VMTableImport;
use crate::VMOffsets;
use memoffset::offset_of;
use std::mem::size_of;
use wasmer_types::ModuleInfo;
#[test]
fn check_vmtable_import_offsets() {
let module = ModuleInfo::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMTableImport>(),
usize::from(offsets.size_of_vmtable_import())
);
assert_eq!(
offset_of!(VMTableImport, definition),
usize::from(offsets.vmtable_import_definition())
);
}
}
#[derive(Clone)]
#[repr(C)]
pub struct VMMemoryImport {
pub definition: NonNull<VMMemoryDefinition>,
pub handle: InternalStoreHandle<VMMemory>,
}
#[cfg(test)]
mod test_vmmemory_import {
use super::VMMemoryImport;
use crate::VMOffsets;
use memoffset::offset_of;
use std::mem::size_of;
use wasmer_types::ModuleInfo;
#[test]
fn check_vmmemory_import_offsets() {
let module = ModuleInfo::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMMemoryImport>(),
usize::from(offsets.size_of_vmmemory_import())
);
assert_eq!(
offset_of!(VMMemoryImport, definition),
usize::from(offsets.vmmemory_import_definition())
);
assert_eq!(
offset_of!(VMMemoryImport, handle),
usize::from(offsets.vmmemory_import_handle())
);
}
}
#[derive(Clone)]
#[repr(C)]
pub struct VMGlobalImport {
pub definition: NonNull<VMGlobalDefinition>,
pub handle: InternalStoreHandle<VMGlobal>,
}
unsafe impl Send for VMGlobalImport {}
unsafe impl Sync for VMGlobalImport {}
#[cfg(test)]
mod test_vmglobal_import {
use super::VMGlobalImport;
use crate::VMOffsets;
use memoffset::offset_of;
use std::mem::size_of;
use wasmer_types::ModuleInfo;
#[test]
fn check_vmglobal_import_offsets() {
let module = ModuleInfo::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMGlobalImport>(),
usize::from(offsets.size_of_vmglobal_import())
);
assert_eq!(
offset_of!(VMGlobalImport, definition),
usize::from(offsets.vmglobal_import_definition())
);
}
}
pub(crate) unsafe fn memory_copy(
mem: &VMMemoryDefinition,
dst: u32,
src: u32,
len: u32,
) -> Result<(), Trap> {
if src
.checked_add(len)
.map_or(true, |n| usize::try_from(n).unwrap() > mem.current_length)
|| dst
.checked_add(len)
.map_or(true, |m| usize::try_from(m).unwrap() > mem.current_length)
{
return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
}
let dst = usize::try_from(dst).unwrap();
let src = usize::try_from(src).unwrap();
let dst = mem.base.add(dst);
let src = mem.base.add(src);
ptr::copy(src, dst, len as usize);
Ok(())
}
pub(crate) unsafe fn memory_fill(
mem: &VMMemoryDefinition,
dst: u32,
val: u32,
len: u32,
) -> Result<(), Trap> {
if dst
.checked_add(len)
.map_or(true, |m| usize::try_from(m).unwrap() > mem.current_length)
{
return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
}
let dst = isize::try_from(dst).unwrap();
let val = val as u8;
let dst = mem.base.offset(dst);
ptr::write_bytes(dst, val, len as usize);
Ok(())
}
pub(crate) unsafe fn memory32_atomic_check32(
mem: &VMMemoryDefinition,
dst: u32,
val: u32,
) -> Result<u32, Trap> {
if usize::try_from(dst).unwrap() > mem.current_length {
return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
}
let dst = isize::try_from(dst).unwrap();
if dst & 0b11 != 0 {
return Err(Trap::lib(TrapCode::UnalignedAtomic));
}
let dst = mem.base.offset(dst) as *mut u32;
let atomic_dst = AtomicPtr::new(dst);
let read_val = *atomic_dst.load(Ordering::Acquire);
let ret = if read_val == val { 0 } else { 1 };
Ok(ret)
}
pub(crate) unsafe fn memory32_atomic_check64(
mem: &VMMemoryDefinition,
dst: u32,
val: u64,
) -> Result<u32, Trap> {
if usize::try_from(dst).unwrap() > mem.current_length {
return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
}
let dst = isize::try_from(dst).unwrap();
if dst & 0b111 != 0 {
return Err(Trap::lib(TrapCode::UnalignedAtomic));
}
let dst = mem.base.offset(dst) as *mut u64;
let atomic_dst = AtomicPtr::new(dst);
let read_val = *atomic_dst.load(Ordering::Acquire);
let ret = if read_val == val { 0 } else { 1 };
Ok(ret)
}
#[derive(Debug, Clone, Copy)]
#[repr(C)]
pub struct VMTableDefinition {
pub base: *mut u8,
pub current_elements: u32,
}
#[cfg(test)]
mod test_vmtable_definition {
use super::VMTableDefinition;
use crate::VMOffsets;
use memoffset::offset_of;
use std::mem::size_of;
use wasmer_types::ModuleInfo;
#[test]
fn check_vmtable_definition_offsets() {
let module = ModuleInfo::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMTableDefinition>(),
usize::from(offsets.size_of_vmtable_definition())
);
assert_eq!(
offset_of!(VMTableDefinition, base),
usize::from(offsets.vmtable_definition_base())
);
assert_eq!(
offset_of!(VMTableDefinition, current_elements),
usize::from(offsets.vmtable_definition_current_elements())
);
}
}
#[derive(Debug, Clone)]
#[repr(C, align(16))]
pub struct VMGlobalDefinition {
pub val: RawValue,
}
#[cfg(test)]
mod test_vmglobal_definition {
use super::VMGlobalDefinition;
use crate::{VMFuncRef, VMOffsets};
use more_asserts::assert_ge;
use std::mem::{align_of, size_of};
use wasmer_types::ModuleInfo;
#[test]
fn check_vmglobal_definition_alignment() {
assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<i32>());
assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<i64>());
assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<f32>());
assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<f64>());
assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<VMFuncRef>());
assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<[u8; 16]>());
}
#[test]
fn check_vmglobal_definition_offsets() {
let module = ModuleInfo::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<*const VMGlobalDefinition>(),
usize::from(offsets.size_of_vmglobal_local())
);
}
#[test]
fn check_vmglobal_begins_aligned() {
let module = ModuleInfo::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
}
}
impl VMGlobalDefinition {
pub fn new() -> Self {
Self {
val: Default::default(),
}
}
}
#[repr(C)]
#[cfg_attr(feature = "artifact-size", derive(loupe::MemoryUsage))]
#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)]
pub struct VMSharedSignatureIndex(u32);
#[cfg(test)]
mod test_vmshared_signature_index {
use super::VMSharedSignatureIndex;
use std::mem::size_of;
use wasmer_types::{ModuleInfo, TargetSharedSignatureIndex, VMOffsets};
#[test]
fn check_vmshared_signature_index() {
let module = ModuleInfo::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMSharedSignatureIndex>(),
usize::from(offsets.size_of_vmshared_signature_index())
);
}
#[test]
fn check_target_shared_signature_index() {
assert_eq!(
size_of::<VMSharedSignatureIndex>(),
size_of::<TargetSharedSignatureIndex>()
);
}
}
impl VMSharedSignatureIndex {
pub fn new(value: u32) -> Self {
Self(value)
}
}
impl Default for VMSharedSignatureIndex {
fn default() -> Self {
Self::new(u32::MAX)
}
}
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
#[repr(C)]
pub struct VMCallerCheckedAnyfunc {
pub func_ptr: *const VMFunctionBody,
pub type_index: VMSharedSignatureIndex,
pub vmctx: VMFunctionContext,
pub call_trampoline: VMTrampoline,
}
#[cfg(test)]
mod test_vmcaller_checked_anyfunc {
use super::VMCallerCheckedAnyfunc;
use crate::VMOffsets;
use memoffset::offset_of;
use std::mem::size_of;
use wasmer_types::ModuleInfo;
#[test]
fn check_vmcaller_checked_anyfunc_offsets() {
let module = ModuleInfo::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMCallerCheckedAnyfunc>(),
usize::from(offsets.size_of_vmcaller_checked_anyfunc())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, func_ptr),
usize::from(offsets.vmcaller_checked_anyfunc_func_ptr())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, type_index),
usize::from(offsets.vmcaller_checked_anyfunc_type_index())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, vmctx),
usize::from(offsets.vmcaller_checked_anyfunc_vmctx())
);
}
}
#[repr(C)]
pub struct VMBuiltinFunctionsArray {
ptrs: [usize; Self::len()],
}
impl VMBuiltinFunctionsArray {
pub const fn len() -> usize {
VMBuiltinFunctionIndex::builtin_functions_total_number() as usize
}
pub fn initialized() -> Self {
use crate::libcalls::*;
let mut ptrs = [0; Self::len()];
ptrs[VMBuiltinFunctionIndex::get_memory32_grow_index().index() as usize] =
wasmer_vm_memory32_grow as usize;
ptrs[VMBuiltinFunctionIndex::get_imported_memory32_grow_index().index() as usize] =
wasmer_vm_imported_memory32_grow as usize;
ptrs[VMBuiltinFunctionIndex::get_memory32_size_index().index() as usize] =
wasmer_vm_memory32_size as usize;
ptrs[VMBuiltinFunctionIndex::get_imported_memory32_size_index().index() as usize] =
wasmer_vm_imported_memory32_size as usize;
ptrs[VMBuiltinFunctionIndex::get_table_copy_index().index() as usize] =
wasmer_vm_table_copy as usize;
ptrs[VMBuiltinFunctionIndex::get_table_init_index().index() as usize] =
wasmer_vm_table_init as usize;
ptrs[VMBuiltinFunctionIndex::get_elem_drop_index().index() as usize] =
wasmer_vm_elem_drop as usize;
ptrs[VMBuiltinFunctionIndex::get_memory_copy_index().index() as usize] =
wasmer_vm_memory32_copy as usize;
ptrs[VMBuiltinFunctionIndex::get_imported_memory_copy_index().index() as usize] =
wasmer_vm_imported_memory32_copy as usize;
ptrs[VMBuiltinFunctionIndex::get_memory_fill_index().index() as usize] =
wasmer_vm_memory32_fill as usize;
ptrs[VMBuiltinFunctionIndex::get_imported_memory_fill_index().index() as usize] =
wasmer_vm_imported_memory32_fill as usize;
ptrs[VMBuiltinFunctionIndex::get_memory_init_index().index() as usize] =
wasmer_vm_memory32_init as usize;
ptrs[VMBuiltinFunctionIndex::get_data_drop_index().index() as usize] =
wasmer_vm_data_drop as usize;
ptrs[VMBuiltinFunctionIndex::get_raise_trap_index().index() as usize] =
wasmer_vm_raise_trap as usize;
ptrs[VMBuiltinFunctionIndex::get_table_size_index().index() as usize] =
wasmer_vm_table_size as usize;
ptrs[VMBuiltinFunctionIndex::get_imported_table_size_index().index() as usize] =
wasmer_vm_imported_table_size as usize;
ptrs[VMBuiltinFunctionIndex::get_table_grow_index().index() as usize] =
wasmer_vm_table_grow as usize;
ptrs[VMBuiltinFunctionIndex::get_imported_table_grow_index().index() as usize] =
wasmer_vm_imported_table_grow as usize;
ptrs[VMBuiltinFunctionIndex::get_table_get_index().index() as usize] =
wasmer_vm_table_get as usize;
ptrs[VMBuiltinFunctionIndex::get_imported_table_get_index().index() as usize] =
wasmer_vm_imported_table_get as usize;
ptrs[VMBuiltinFunctionIndex::get_table_set_index().index() as usize] =
wasmer_vm_table_set as usize;
ptrs[VMBuiltinFunctionIndex::get_imported_table_set_index().index() as usize] =
wasmer_vm_imported_table_set as usize;
ptrs[VMBuiltinFunctionIndex::get_func_ref_index().index() as usize] =
wasmer_vm_func_ref as usize;
ptrs[VMBuiltinFunctionIndex::get_table_fill_index().index() as usize] =
wasmer_vm_table_fill as usize;
ptrs[VMBuiltinFunctionIndex::get_memory_atomic_wait32_index().index() as usize] =
wasmer_vm_memory32_atomic_wait32 as usize;
ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_wait32_index().index() as usize] =
wasmer_vm_imported_memory32_atomic_wait32 as usize;
ptrs[VMBuiltinFunctionIndex::get_memory_atomic_wait64_index().index() as usize] =
wasmer_vm_memory32_atomic_wait64 as usize;
ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_wait64_index().index() as usize] =
wasmer_vm_imported_memory32_atomic_wait64 as usize;
ptrs[VMBuiltinFunctionIndex::get_memory_atomic_notify_index().index() as usize] =
wasmer_vm_memory32_atomic_notify as usize;
ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_notify_index().index() as usize] =
wasmer_vm_imported_memory32_atomic_notify as usize;
debug_assert!(ptrs.iter().cloned().all(|p| p != 0));
Self { ptrs }
}
}
#[derive(Debug)]
#[repr(C, align(16))] pub struct VMContext {}
impl VMContext {
#[allow(clippy::cast_ptr_alignment)]
#[inline]
pub(crate) unsafe fn instance(&self) -> &Instance {
&*((self as *const Self as *mut u8).offset(-Instance::vmctx_offset()) as *const Instance)
}
#[inline]
pub(crate) unsafe fn instance_mut(&mut self) -> &mut Instance {
&mut *((self as *const Self as *mut u8).offset(-Instance::vmctx_offset()) as *mut Instance)
}
}
pub type VMTrampoline = unsafe extern "C" fn(
*mut VMContext, *const VMFunctionBody, *mut RawValue, );
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMMemoryDefinition {
pub base: *mut u8,
pub current_length: usize,
}
unsafe impl Send for VMMemoryDefinition {}
unsafe impl Sync for VMMemoryDefinition {}
#[cfg(test)]
mod test_vmmemory_definition {
use super::VMMemoryDefinition;
use crate::VMOffsets;
use memoffset::offset_of;
use std::mem::size_of;
use wasmer_types::ModuleInfo;
#[test]
fn check_vmmemory_definition_offsets() {
let module = ModuleInfo::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMMemoryDefinition>(),
usize::from(offsets.size_of_vmmemory_definition())
);
assert_eq!(
offset_of!(VMMemoryDefinition, base),
usize::from(offsets.vmmemory_definition_base())
);
assert_eq!(
offset_of!(VMMemoryDefinition, current_length),
usize::from(offsets.vmmemory_definition_current_length())
);
}
}