pub struct MachineX86_64 {
    assembler: AssemblerX64,
    used_gprs: u32,
    used_simd: u32,
    trap_table: TrapTable,
    instructions_address_map: Vec<InstructionAddressMap>,
    src_loc: u32,
    unwind_ops: Vec<(usize, UnwindOps)>,
}

Fields§

§assembler: AssemblerX64§used_gprs: u32§used_simd: u32§trap_table: TrapTable§instructions_address_map: Vec<InstructionAddressMap>

Map from byte offset into wasm function to range of native instructions.

§src_loc: u32

The source location for the current operator.

§unwind_ops: Vec<(usize, UnwindOps)>

Vector of unwind operations with offset

Implementations§

source§

impl MachineX86_64

source

pub fn new(target: Option<Target>) -> Result<Self, CompileError>

source

pub fn emit_relaxed_binop( &mut self, op: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>, sz: Size, src: Location<GPR, XMM>, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

pub fn emit_relaxed_zx_sx( &mut self, op: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Size, _: Location<GPR, XMM>) -> Result<(), CompileError>, sz_src: Size, src: Location<GPR, XMM>, sz_dst: Size, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn emit_binop_i32( &mut self, f: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

I32 binary operation with both operands popped from the virtual stack.

source

fn emit_binop_i64( &mut self, f: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

I64 binary operation with both operands popped from the virtual stack.

source

fn emit_cmpop_i64_dynamic_b( &mut self, c: Condition, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

I64 comparison with.

source

fn emit_shift_i64( &mut self, f: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

I64 shift with both operands popped from the virtual stack.

source

fn emit_relaxed_xdiv( &mut self, op: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>) -> Result<(), CompileError>, sz: Size, loc: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, ) -> Result<usize, CompileError>

Moves loc to a valid location for div/idiv.

source

fn emit_cmpop_i32_dynamic_b( &mut self, c: Condition, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

I32 comparison with.

source

fn emit_shift_i32( &mut self, f: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

I32 shift with both operands popped from the virtual stack.

source

fn memory_op<F: FnOnce(&mut Self, GPR) -> Result<(), CompileError>>( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, check_alignment: bool, value_size: usize, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, cb: F, ) -> Result<(), CompileError>

source

fn emit_compare_and_swap<F: FnOnce(&mut Self, GPR, GPR) -> Result<(), CompileError>>( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, ret: Location<GPR, XMM>, memarg: &MemArg, value_size: usize, memory_sz: Size, stack_sz: Size, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, cb: F, ) -> Result<(), CompileError>

source

fn emit_f32_int_conv_check( &mut self, reg: XMM, lower_bound: f32, upper_bound: f32, underflow_label: DynamicLabel, overflow_label: DynamicLabel, nan_label: DynamicLabel, succeed_label: DynamicLabel, ) -> Result<(), CompileError>

source

fn emit_f32_int_conv_check_trap( &mut self, reg: XMM, lower_bound: f32, upper_bound: f32, ) -> Result<(), CompileError>

source

fn emit_f32_int_conv_check_sat<F1: FnOnce(&mut Self) -> Result<(), CompileError>, F2: FnOnce(&mut Self) -> Result<(), CompileError>, F3: FnOnce(&mut Self) -> Result<(), CompileError>, F4: FnOnce(&mut Self) -> Result<(), CompileError>>( &mut self, reg: XMM, lower_bound: f32, upper_bound: f32, underflow_cb: F1, overflow_cb: F2, nan_cb: Option<F3>, convert_cb: F4, ) -> Result<(), CompileError>

source

fn emit_f64_int_conv_check( &mut self, reg: XMM, lower_bound: f64, upper_bound: f64, underflow_label: DynamicLabel, overflow_label: DynamicLabel, nan_label: DynamicLabel, succeed_label: DynamicLabel, ) -> Result<(), CompileError>

source

fn emit_f64_int_conv_check_trap( &mut self, reg: XMM, lower_bound: f64, upper_bound: f64, ) -> Result<(), CompileError>

source

fn emit_f64_int_conv_check_sat<F1: FnOnce(&mut Self) -> Result<(), CompileError>, F2: FnOnce(&mut Self) -> Result<(), CompileError>, F3: FnOnce(&mut Self) -> Result<(), CompileError>, F4: FnOnce(&mut Self) -> Result<(), CompileError>>( &mut self, reg: XMM, lower_bound: f64, upper_bound: f64, underflow_cb: F1, overflow_cb: F2, nan_cb: Option<F3>, convert_cb: F4, ) -> Result<(), CompileError>

source

fn emit_relaxed_avx( &mut self, op: fn(_: &mut AssemblerX64, _: XMM, _: XMMOrMemory, _: XMM) -> Result<(), CompileError>, src1: Location<GPR, XMM>, src2: Location<GPR, XMM>, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>

Moves src1 and src2 to valid locations and possibly adds a layer of indirection for dst for AVX instructions.

source

fn emit_relaxed_avx_base<F: FnOnce(&mut Self, XMM, XMMOrMemory, XMM) -> Result<(), CompileError>>( &mut self, op: F, src1: Location<GPR, XMM>, src2: Location<GPR, XMM>, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>

Moves src1 and src2 to valid locations and possibly adds a layer of indirection for dst for AVX instructions.

source

fn convert_i64_f64_u_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i64_f64_u_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i64_f64_s_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i64_f64_s_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i32_f64_s_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i32_f64_s_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i32_f64_u_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i32_f64_u_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i64_f32_u_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i64_f32_u_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i64_f32_s_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i64_f32_s_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i32_f32_s_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i32_f32_s_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i32_f32_u_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn convert_i32_f32_u_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn emit_relaxed_atomic_xchg( &mut self, sz: Size, src: Location<GPR, XMM>, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>

source

fn used_gprs_contains(&self, r: &GPR) -> bool

source

fn used_simd_contains(&self, r: &XMM) -> bool

source

fn used_gprs_insert(&mut self, r: GPR)

source

fn used_simd_insert(&mut self, r: XMM)

source

fn used_gprs_remove(&mut self, r: &GPR) -> bool

source

fn used_simd_remove(&mut self, r: &XMM) -> bool

source

fn emit_unwind_op(&mut self, op: UnwindOps) -> Result<(), CompileError>

source

fn emit_illegal_op_internal( &mut self, trap: TrapCode, ) -> Result<(), CompileError>

Trait Implementations§

source§

impl Machine for MachineX86_64

source§

fn set_srcloc(&mut self, offset: u32)

Set the source location of the Wasm to the given offset.

source§

fn mark_address_range_with_trap_code( &mut self, code: TrapCode, begin: usize, end: usize, )

Marks each address in the code range emitted by f with the trap code code.

source§

fn mark_address_with_trap_code(&mut self, code: TrapCode)

Marks one address as trappable with trap code code.

source§

fn mark_instruction_with_trap_code(&mut self, code: TrapCode) -> usize

Marks the instruction as trappable with trap code code. return “begin” offset

source§

fn mark_instruction_address_end(&mut self, begin: usize)

Pushes the instruction to the address map, calculating the offset from a provided beginning address.

source§

fn insert_stackoverflow(&mut self)

Insert a StackOverflow (at offset 0)

source§

fn collect_trap_information(&self) -> Vec<TrapInformation>

Get all current TrapInformation

§

type GPR = GPR

§

type SIMD = XMM

source§

fn assembler_get_offset(&self) -> AssemblyOffset

Get current assembler offset
source§

fn index_from_gpr(&self, x: GPR) -> RegisterIndex

Convert from a GPR register to index register
source§

fn index_from_simd(&self, x: XMM) -> RegisterIndex

Convert from an SIMD register
source§

fn get_vmctx_reg(&self) -> GPR

Get the GPR that hold vmctx
source§

fn get_used_gprs(&self) -> Vec<GPR>

Get all used GPR
source§

fn get_used_simd(&self) -> Vec<XMM>

Get all used SIMD regs
source§

fn pick_gpr(&self) -> Option<GPR>

Picks an unused general purpose register for local/stack/argument use. Read more
source§

fn pick_temp_gpr(&self) -> Option<GPR>

Picks an unused general purpose register for internal temporary use. Read more
source§

fn acquire_temp_gpr(&mut self) -> Option<GPR>

Picks an unused general pupose register and mark it as used
source§

fn release_gpr(&mut self, gpr: GPR)

Releases a temporary GPR.
source§

fn reserve_unused_temp_gpr(&mut self, gpr: GPR) -> GPR

Specify that a given register is in use.
source§

fn reserve_gpr(&mut self, gpr: GPR)

reserve a GPR
source§

fn push_used_gpr(&mut self, used_gprs: &[GPR]) -> Result<usize, CompileError>

Push used gpr to the stack. Return the bytes taken on the stack
source§

fn pop_used_gpr(&mut self, used_gprs: &[GPR]) -> Result<(), CompileError>

Pop used gpr to the stack
source§

fn pick_simd(&self) -> Option<XMM>

Picks an unused SIMD register. Read more
source§

fn pick_temp_simd(&self) -> Option<XMM>

Picks an unused SIMD register for internal temporary use. Read more
source§

fn acquire_temp_simd(&mut self) -> Option<XMM>

Acquires a temporary XMM register.
source§

fn reserve_simd(&mut self, simd: XMM)

reserve a SIMD register
source§

fn release_simd(&mut self, simd: XMM)

Releases a temporary XMM register.
source§

fn push_used_simd(&mut self, used_xmms: &[XMM]) -> Result<usize, CompileError>

Push used simd regs to the stack. Return bytes taken on the stack
source§

fn pop_used_simd(&mut self, used_xmms: &[XMM]) -> Result<(), CompileError>

Pop used simd regs to the stack
source§

fn instructions_address_map(&self) -> Vec<InstructionAddressMap>

source§

fn local_on_stack(&mut self, stack_offset: i32) -> Location<GPR, XMM>

Memory location for a local on the stack Like Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)) for x86_64
source§

fn round_stack_adjust(&self, value: usize) -> usize

Return a rounded stack adjustement value (must be multiple of 16bytes on ARM64 for example)
source§

fn adjust_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>

Adjust stack for locals Like assembler.emit_sub(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))
source§

fn restore_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>

restore stack Like assembler.emit_add(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))
source§

fn pop_stack_locals( &mut self, delta_stack_offset: u32, ) -> Result<(), CompileError>

Pop stack of locals Like assembler.emit_add(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))
source§

fn move_location_for_native( &mut self, _size: Size, loc: Location<GPR, XMM>, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>

push a value on the stack for a native call
source§

fn zero_location( &mut self, size: Size, location: Location<GPR, XMM>, ) -> Result<(), CompileError>

Zero a location taht is 32bits
source§

fn local_pointer(&self) -> GPR

GPR Reg used for local pointer on the stack
source§

fn is_local_on_stack(&self, idx: usize) -> bool

Determine whether a local should be allocated on the stack.
source§

fn get_local_location( &self, idx: usize, callee_saved_regs_size: usize, ) -> Location<GPR, XMM>

Determine a local’s location.
source§

fn move_local( &mut self, stack_offset: i32, location: Location<GPR, XMM>, ) -> Result<(), CompileError>

Move a local to the stack Like emit_mov(Size::S64, location, Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)));
source§

fn list_to_save( &self, calling_convention: CallingConvention, ) -> Vec<Location<GPR, XMM>>

List of register to save, depending on the CallingConvention
source§

fn get_param_location( &self, idx: usize, _sz: Size, stack_location: &mut usize, calling_convention: CallingConvention, ) -> Location<GPR, XMM>

Get param location (to build a call, using SP for stack args)
source§

fn get_call_param_location( &self, idx: usize, _sz: Size, _stack_location: &mut usize, calling_convention: CallingConvention, ) -> Location<GPR, XMM>

Get call param location (from a call, using FP for stack args)
source§

fn get_simple_param_location( &self, idx: usize, calling_convention: CallingConvention, ) -> Location<GPR, XMM>

Get simple param location
source§

fn move_location( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>

move a location to another
source§

fn move_location_extend( &mut self, size_val: Size, signed: bool, source: Location<GPR, XMM>, size_op: Size, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>

move a location to another, with zero or sign extension
source§

fn load_address( &mut self, size: Size, reg: Location<GPR, XMM>, mem: Location<GPR, XMM>, ) -> Result<(), CompileError>

Load a memory value to a register, zero extending to 64bits. Panic if gpr is not a Location::GPR or if mem is not a Memory(2)
source§

fn init_stack_loc( &mut self, init_stack_loc_cnt: u64, last_stack_loc: Location<GPR, XMM>, ) -> Result<(), CompileError>

Init the stack loc counter
source§

fn restore_saved_area( &mut self, saved_area_offset: i32, ) -> Result<(), CompileError>

Restore save_area
source§

fn pop_location( &mut self, location: Location<GPR, XMM>, ) -> Result<(), CompileError>

Pop a location
source§

fn new_machine_state(&self) -> MachineState

Create a new MachineState with default values.
source§

fn assembler_finalize(self) -> Result<Vec<u8>, CompileError>

Finalize the assembler
source§

fn get_offset(&self) -> AssemblyOffset

get_offset of Assembler
source§

fn finalize_function(&mut self) -> Result<(), CompileError>

finalize a function
source§

fn emit_function_prolog(&mut self) -> Result<(), CompileError>

emit native function prolog (depending on the calling Convention, like “PUSH RBP / MOV RSP, RBP”)
source§

fn emit_function_epilog(&mut self) -> Result<(), CompileError>

emit native function epilog (depending on the calling Convention, like “MOV RBP, RSP / POP RBP”)
source§

fn emit_function_return_value( &mut self, ty: WpType, canonicalize: bool, loc: Location<GPR, XMM>, ) -> Result<(), CompileError>

handle return value, with optionnal cannonicalization if wanted
source§

fn emit_function_return_float(&mut self) -> Result<(), CompileError>

Handle copy to SIMD register from ret value (if needed by the arch/calling convention)
source§

fn arch_supports_canonicalize_nan(&self) -> bool

Is NaN canonicalization supported
source§

fn canonicalize_nan( &mut self, sz: Size, input: Location<GPR, XMM>, output: Location<GPR, XMM>, ) -> Result<(), CompileError>

Cannonicalize a NaN (or panic if not supported)
source§

fn emit_illegal_op(&mut self, trap: TrapCode) -> Result<(), CompileError>

emit an Illegal Opcode, associated with a trapcode
source§

fn get_label(&mut self) -> DynamicLabel

create a new label
source§

fn emit_label(&mut self, label: DynamicLabel) -> Result<(), CompileError>

emit a label
source§

fn get_grp_for_call(&self) -> GPR

get the gpr use for call. like RAX on x86_64
source§

fn emit_call_register(&mut self, reg: GPR) -> Result<(), CompileError>

Emit a call using the value in register
source§

fn emit_call_label(&mut self, label: DynamicLabel) -> Result<(), CompileError>

Emit a call to a label
source§

fn get_gpr_for_ret(&self) -> GPR

get the gpr for the return of generic values
source§

fn get_simd_for_ret(&self) -> XMM

get the simd for the return of float/double values
source§

fn arch_requires_indirect_call_trampoline(&self) -> bool

Does an trampoline is neededfor indirect call
source§

fn arch_emit_indirect_call_with_trampoline( &mut self, location: Location<GPR, XMM>, ) -> Result<(), CompileError>

indirect call with trampoline
source§

fn emit_debug_breakpoint(&mut self) -> Result<(), CompileError>

Emit a debug breakpoint
source§

fn emit_call_location( &mut self, location: Location<GPR, XMM>, ) -> Result<(), CompileError>

emit a call to a location
source§

fn location_address( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>

load the address of a memory location (will panic if src is not a memory) like LEA opcode on x86_64
source§

fn location_and( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, _flags: bool, ) -> Result<(), CompileError>

And src & dst -> dst (with or without flags)
source§

fn location_xor( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, _flags: bool, ) -> Result<(), CompileError>

Xor src & dst -> dst (with or without flags)
source§

fn location_or( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, _flags: bool, ) -> Result<(), CompileError>

Or src & dst -> dst (with or without flags)
source§

fn location_test( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>

Test src & dst and set flags
source§

fn location_add( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, _flags: bool, ) -> Result<(), CompileError>

Add src+dst -> dst (with or without flags)
source§

fn location_sub( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, _flags: bool, ) -> Result<(), CompileError>

Sub dst-src -> dst (with or without flags)
source§

fn location_cmp( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>

Cmp src - dst and set flags
source§

fn jmp_unconditionnal( &mut self, label: DynamicLabel, ) -> Result<(), CompileError>

jmp without condidtion
source§

fn jmp_on_equal(&mut self, label: DynamicLabel) -> Result<(), CompileError>

jmp on equal (src==dst) like Equal set on x86_64
source§

fn jmp_on_different(&mut self, label: DynamicLabel) -> Result<(), CompileError>

jmp on different (src!=dst) like NotEqual set on x86_64
source§

fn jmp_on_above(&mut self, label: DynamicLabel) -> Result<(), CompileError>

jmp on above (src>dst) like Above set on x86_64
source§

fn jmp_on_aboveequal(&mut self, label: DynamicLabel) -> Result<(), CompileError>

jmp on above (src>=dst) like Above or Equal set on x86_64
source§

fn jmp_on_belowequal(&mut self, label: DynamicLabel) -> Result<(), CompileError>

jmp on above (src<=dst) like Below or Equal set on x86_64
source§

fn jmp_on_overflow(&mut self, label: DynamicLabel) -> Result<(), CompileError>

jmp on overflow like Carry set on x86_64
source§

fn emit_jmp_to_jumptable( &mut self, label: DynamicLabel, cond: Location<GPR, XMM>, ) -> Result<(), CompileError>

jmp using a jump table at lable with cond as the indice
source§

fn align_for_loop(&mut self) -> Result<(), CompileError>

Align for Loop (may do nothing, depending on the arch)
source§

fn emit_ret(&mut self) -> Result<(), CompileError>

ret (from a Call)
source§

fn emit_push( &mut self, size: Size, loc: Location<GPR, XMM>, ) -> Result<(), CompileError>

Stack push of a location
source§

fn emit_pop( &mut self, size: Size, loc: Location<GPR, XMM>, ) -> Result<(), CompileError>

Stack pop of a location
source§

fn emit_memory_fence(&mut self) -> Result<(), CompileError>

Emit a memory fence. Can be nothing for x86_64 or a DMB on ARM64 for example
source§

fn location_neg( &mut self, size_val: Size, signed: bool, source: Location<GPR, XMM>, size_op: Size, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>

-src -> dst
source§

fn emit_imul_imm32( &mut self, size: Size, imm32: u32, gpr: GPR, ) -> Result<(), CompileError>

Multiply location with immediate
source§

fn emit_relaxed_mov( &mut self, sz: Size, src: Location<GPR, XMM>, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>

relaxed mov: move from anywhere to anywhere
source§

fn emit_relaxed_cmp( &mut self, sz: Size, src: Location<GPR, XMM>, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>

relaxed cmp: compare from anywhere and anywhere
source§

fn emit_relaxed_zero_extension( &mut self, sz_src: Size, src: Location<GPR, XMM>, sz_dst: Size, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>

relaxed move with zero extension
source§

fn emit_relaxed_sign_extension( &mut self, sz_src: Size, src: Location<GPR, XMM>, sz_dst: Size, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>

relaxed move with sign extension
source§

fn emit_binop_add32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Add with location directly from the stack
source§

fn emit_binop_sub32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Sub with location directly from the stack
source§

fn emit_binop_mul32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Multiply with location directly from the stack
source§

fn emit_binop_udiv32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
source§

fn emit_binop_sdiv32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
source§

fn emit_binop_urem32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
source§

fn emit_binop_srem32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
source§

fn emit_binop_and32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

And with location directly from the stack
source§

fn emit_binop_or32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Or with location directly from the stack
source§

fn emit_binop_xor32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Xor with location directly from the stack
source§

fn i32_cmp_ge_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Signed Greater of Equal Compare 2 i32, result in a GPR
source§

fn i32_cmp_gt_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Signed Greater Than Compare 2 i32, result in a GPR
source§

fn i32_cmp_le_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Signed Less of Equal Compare 2 i32, result in a GPR
source§

fn i32_cmp_lt_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Signed Less Than Compare 2 i32, result in a GPR
source§

fn i32_cmp_ge_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Unsigned Greater of Equal Compare 2 i32, result in a GPR
source§

fn i32_cmp_gt_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Unsigned Greater Than Compare 2 i32, result in a GPR
source§

fn i32_cmp_le_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Unsigned Less of Equal Compare 2 i32, result in a GPR
source§

fn i32_cmp_lt_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Unsigned Less Than Compare 2 i32, result in a GPR
source§

fn i32_cmp_ne( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Not Equal Compare 2 i32, result in a GPR
source§

fn i32_cmp_eq( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Equal Compare 2 i32, result in a GPR
source§

fn i32_clz( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Count Leading 0 bit of an i32
source§

fn i32_ctz( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Count Trailling 0 bit of an i32
source§

fn i32_popcnt( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Count the number of 1 bit of an i32
source§

fn i32_shl( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

i32 Logical Shift Left
source§

fn i32_shr( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

i32 Logical Shift Right
source§

fn i32_sar( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

i32 Arithmetic Shift Right
source§

fn i32_rol( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

i32 Roll Left
source§

fn i32_ror( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

i32 Roll Right
source§

fn i32_load( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 load
source§

fn i32_load_8u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 load of an unsigned 8bits
source§

fn i32_load_8s( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 load of an signed 8bits
source§

fn i32_load_16u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 load of an unsigned 16bits
source§

fn i32_load_16s( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 load of an signed 16bits
source§

fn i32_atomic_load( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic load
source§

fn i32_atomic_load_8u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic load of an unsigned 8bits
source§

fn i32_atomic_load_16u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic load of an unsigned 16bits
source§

fn i32_save( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 save
source§

fn i32_save_8( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 save of the lower 8bits
source§

fn i32_save_16( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 save of the lower 16bits
source§

fn i32_atomic_save( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic save
source§

fn i32_atomic_save_8( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic save of a the lower 8bits
source§

fn i32_atomic_save_16( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic save of a the lower 16bits
source§

fn i32_atomic_add( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Add with i32
source§

fn i32_atomic_add_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Add with unsigned 8bits
source§

fn i32_atomic_add_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Add with unsigned 16bits
source§

fn i32_atomic_sub( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Sub with i32
source§

fn i32_atomic_sub_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Sub with unsigned 8bits
source§

fn i32_atomic_sub_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Sub with unsigned 16bits
source§

fn i32_atomic_and( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic And with i32
source§

fn i32_atomic_and_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic And with unsigned 8bits
source§

fn i32_atomic_and_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic And with unsigned 16bits
source§

fn i32_atomic_or( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Or with i32
source§

fn i32_atomic_or_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Or with unsigned 8bits
source§

fn i32_atomic_or_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Or with unsigned 16bits
source§

fn i32_atomic_xor( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Xor with i32
source§

fn i32_atomic_xor_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Xor with unsigned 8bits
source§

fn i32_atomic_xor_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Xor with unsigned 16bits
source§

fn i32_atomic_xchg( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Exchange with i32
source§

fn i32_atomic_xchg_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Exchange with u8
source§

fn i32_atomic_xchg_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Exchange with u16
source§

fn i32_atomic_cmpxchg( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Compare and Exchange with i32
source§

fn i32_atomic_cmpxchg_8u( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Compare and Exchange with u8
source§

fn i32_atomic_cmpxchg_16u( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Compare and Exchange with u16
source§

fn emit_call_with_reloc( &mut self, _calling_convention: CallingConvention, reloc_target: RelocationTarget, ) -> Result<Vec<Relocation>, CompileError>

emit a move function address to GPR ready for call, using appropriate relocation
source§

fn emit_binop_add64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Add with location directly from the stack
source§

fn emit_binop_sub64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Sub with location directly from the stack
source§

fn emit_binop_mul64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Multiply with location directly from the stack
source§

fn emit_binop_udiv64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
source§

fn emit_binop_sdiv64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
source§

fn emit_binop_urem64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
source§

fn emit_binop_srem64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
source§

fn emit_binop_and64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

And with location directly from the stack
source§

fn emit_binop_or64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Or with location directly from the stack
source§

fn emit_binop_xor64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Xor with location directly from the stack
source§

fn i64_cmp_ge_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Signed Greater of Equal Compare 2 i64, result in a GPR
source§

fn i64_cmp_gt_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Signed Greater Than Compare 2 i64, result in a GPR
source§

fn i64_cmp_le_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Signed Less of Equal Compare 2 i64, result in a GPR
source§

fn i64_cmp_lt_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Signed Less Than Compare 2 i64, result in a GPR
source§

fn i64_cmp_ge_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Unsigned Greater of Equal Compare 2 i64, result in a GPR
source§

fn i64_cmp_gt_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Unsigned Greater Than Compare 2 i64, result in a GPR
source§

fn i64_cmp_le_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Unsigned Less of Equal Compare 2 i64, result in a GPR
source§

fn i64_cmp_lt_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Unsigned Less Than Compare 2 i64, result in a GPR
source§

fn i64_cmp_ne( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Not Equal Compare 2 i64, result in a GPR
source§

fn i64_cmp_eq( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Equal Compare 2 i64, result in a GPR
source§

fn i64_clz( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Count Leading 0 bit of an i64
source§

fn i64_ctz( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Count Trailling 0 bit of an i64
source§

fn i64_popcnt( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Count the number of 1 bit of an i64
source§

fn i64_shl( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

i64 Logical Shift Left
source§

fn i64_shr( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

i64 Logical Shift Right
source§

fn i64_sar( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

i64 Arithmetic Shift Right
source§

fn i64_rol( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

i64 Roll Left
source§

fn i64_ror( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

i64 Roll Right
source§

fn i64_load( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load
source§

fn i64_load_8u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load of an unsigned 8bits
source§

fn i64_load_8s( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load of an signed 8bits
source§

fn i64_load_16u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load of an signed 16bits
source§

fn i64_load_16s( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load of an signed 16bits
source§

fn i64_load_32u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load of an unsigned 32bits
source§

fn i64_load_32s( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load of an signed 32bits
source§

fn i64_atomic_load( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic load
source§

fn i64_atomic_load_8u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic load from unsigned 8bits
source§

fn i64_atomic_load_16u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic load from unsigned 16bits
source§

fn i64_atomic_load_32u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic load from unsigned 32bits
source§

fn i64_save( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 save
source§

fn i64_save_8( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 save of the lower 8bits
source§

fn i64_save_16( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 save of the lower 16bits
source§

fn i64_save_32( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 save of the lower 32bits
source§

fn i64_atomic_save( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic save
source§

fn i64_atomic_save_8( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic save of a the lower 8bits
source§

fn i64_atomic_save_16( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic save of a the lower 16bits
source§

fn i64_atomic_save_32( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic save of a the lower 32bits
source§

fn i64_atomic_add( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Add with i64
source§

fn i64_atomic_add_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Add with unsigned 8bits
source§

fn i64_atomic_add_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Add with unsigned 16bits
source§

fn i64_atomic_add_32u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Add with unsigned 32bits
source§

fn i64_atomic_sub( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Sub with i64
source§

fn i64_atomic_sub_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Sub with unsigned 8bits
source§

fn i64_atomic_sub_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Sub with unsigned 16bits
source§

fn i64_atomic_sub_32u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Sub with unsigned 32bits
source§

fn i64_atomic_and( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic And with i64
source§

fn i64_atomic_and_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic And with unsigned 8bits
source§

fn i64_atomic_and_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic And with unsigned 16bits
source§

fn i64_atomic_and_32u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic And with unsigned 32bits
source§

fn i64_atomic_or( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Or with i64
source§

fn i64_atomic_or_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Or with unsigned 8bits
source§

fn i64_atomic_or_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Or with unsigned 16bits
source§

fn i64_atomic_or_32u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Or with unsigned 32bits
source§

fn i64_atomic_xor( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Xor with i64
source§

fn i64_atomic_xor_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Xor with unsigned 8bits
source§

fn i64_atomic_xor_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Xor with unsigned 16bits
source§

fn i64_atomic_xor_32u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Xor with unsigned 32bits
source§

fn i64_atomic_xchg( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Exchange with i64
source§

fn i64_atomic_xchg_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Exchange with u8
source§

fn i64_atomic_xchg_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Exchange with u16
source§

fn i64_atomic_xchg_32u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Exchange with u32
source§

fn i64_atomic_cmpxchg( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Compare and Exchange with i32
source§

fn i64_atomic_cmpxchg_8u( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Compare and Exchange with u8
source§

fn i64_atomic_cmpxchg_16u( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Compare and Exchange with u16
source§

fn i64_atomic_cmpxchg_32u( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Compare and Exchange with u32
source§

fn f32_load( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

load an F32
source§

fn f32_save( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, canonicalize: bool, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

f32 save
source§

fn f64_load( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

load an F64
source§

fn f64_save( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, canonicalize: bool, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

f64 save
source§

fn convert_f64_i64( &mut self, loc: Location<GPR, XMM>, signed: bool, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Convert a F64 from I64, signed or unsigned
source§

fn convert_f64_i32( &mut self, loc: Location<GPR, XMM>, signed: bool, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Convert a F64 from I32, signed or unsigned
source§

fn convert_f32_i64( &mut self, loc: Location<GPR, XMM>, signed: bool, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Convert a F32 from I64, signed or unsigned
source§

fn convert_f32_i32( &mut self, loc: Location<GPR, XMM>, signed: bool, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Convert a F32 from I32, signed or unsigned
source§

fn convert_i64_f64( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, signed: bool, sat: bool, ) -> Result<(), CompileError>

Convert a F64 to I64, signed or unsigned, without or without saturation
source§

fn convert_i32_f64( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, signed: bool, sat: bool, ) -> Result<(), CompileError>

Convert a F64 to I32, signed or unsigned, without or without saturation
source§

fn convert_i64_f32( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, signed: bool, sat: bool, ) -> Result<(), CompileError>

Convert a F32 to I64, signed or unsigned, without or without saturation
source§

fn convert_i32_f32( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, signed: bool, sat: bool, ) -> Result<(), CompileError>

Convert a F32 to I32, signed or unsigned, without or without saturation
source§

fn convert_f64_f32( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Convert a F32 to F64
source§

fn convert_f32_f64( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Convert a F64 to F32
source§

fn f64_neg( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Negate an F64
source§

fn f64_abs( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Get the Absolute Value of an F64
source§

fn emit_i64_copysign( &mut self, tmp1: GPR, tmp2: GPR, ) -> Result<(), CompileError>

Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
source§

fn f64_sqrt( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Get the Square Root of an F64
source§

fn f64_trunc( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Trunc of an F64
source§

fn f64_ceil( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Ceil of an F64
source§

fn f64_floor( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Floor of an F64
source§

fn f64_nearest( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Round at nearest int of an F64
source§

fn f64_cmp_ge( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Greater of Equal Compare 2 F64, result in a GPR
source§

fn f64_cmp_gt( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Greater Than Compare 2 F64, result in a GPR
source§

fn f64_cmp_le( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Less of Equal Compare 2 F64, result in a GPR
source§

fn f64_cmp_lt( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Less Than Compare 2 F64, result in a GPR
source§

fn f64_cmp_ne( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Not Equal Compare 2 F64, result in a GPR
source§

fn f64_cmp_eq( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Equal Compare 2 F64, result in a GPR
source§

fn f64_min( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

get Min for 2 F64 values
source§

fn f64_max( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

get Max for 2 F64 values
source§

fn f64_add( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Add 2 F64 values
source§

fn f64_sub( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Sub 2 F64 values
source§

fn f64_mul( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Multiply 2 F64 values
source§

fn f64_div( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Divide 2 F64 values
source§

fn f32_neg( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Negate an F32
source§

fn f32_abs( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Get the Absolute Value of an F32
source§

fn emit_i32_copysign( &mut self, tmp1: GPR, tmp2: GPR, ) -> Result<(), CompileError>

Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
source§

fn f32_sqrt( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Get the Square Root of an F32
source§

fn f32_trunc( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Trunc of an F32
source§

fn f32_ceil( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Ceil of an F32
source§

fn f32_floor( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Floor of an F32
source§

fn f32_nearest( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Round at nearest int of an F32
source§

fn f32_cmp_ge( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Greater of Equal Compare 2 F32, result in a GPR
source§

fn f32_cmp_gt( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Greater Than Compare 2 F32, result in a GPR
source§

fn f32_cmp_le( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Less of Equal Compare 2 F32, result in a GPR
source§

fn f32_cmp_lt( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Less Than Compare 2 F32, result in a GPR
source§

fn f32_cmp_ne( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Not Equal Compare 2 F32, result in a GPR
source§

fn f32_cmp_eq( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Equal Compare 2 F32, result in a GPR
source§

fn f32_min( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

get Min for 2 F32 values
source§

fn f32_max( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

get Max for 2 F32 values
source§

fn f32_add( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Add 2 F32 values
source§

fn f32_sub( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Sub 2 F32 values
source§

fn f32_mul( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Multiply 2 F32 values
source§

fn f32_div( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>

Divide 2 F32 values
source§

fn gen_std_trampoline( &self, sig: &FunctionType, calling_convention: CallingConvention, ) -> Result<FunctionBody, CompileError>

Standard function Trampoline generation
source§

fn gen_std_dynamic_import_trampoline( &self, vmoffsets: &VMOffsets, sig: &FunctionType, calling_convention: CallingConvention, ) -> Result<FunctionBody, CompileError>

Generates dynamic import function call trampoline for a function type.
source§

fn gen_import_call_trampoline( &self, vmoffsets: &VMOffsets, index: FunctionIndex, sig: &FunctionType, calling_convention: CallingConvention, ) -> Result<CustomSection, CompileError>

Singlepass calls import functions through a trampoline.
source§

fn gen_dwarf_unwind_info( &mut self, code_len: usize, ) -> Option<UnwindInstructions>

generate eh_frame instruction (or None if not possible / supported)
source§

fn gen_windows_unwind_info(&mut self, _code_len: usize) -> Option<Vec<u8>>

generate Windows unwind instructions (or None if not possible / supported)

Auto Trait Implementations§

Blanket Implementations§

source§

impl<T> Any for T
where T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
§

impl<T> ArchivePointee for T

§

type ArchivedMetadata = ()

The archived version of the pointer metadata for this type.
§

fn pointer_metadata( _: &<T as ArchivePointee>::ArchivedMetadata, ) -> <T as Pointee>::Metadata

Converts some archived metadata to the pointer metadata for itself.
source§

impl<T> Borrow<T> for T
where T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

source§

impl<T, U> Into<U> for T
where U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

source§

impl<T> IntoEither for T

source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
§

impl<T> LayoutRaw for T

§

fn layout_raw(_: <T as Pointee>::Metadata) -> Result<Layout, LayoutError>

Returns the layout of the type.
§

impl<T, N1, N2> Niching<NichedOption<T, N1>> for N2
where T: SharedNiching<N1, N2>, N1: Niching<T>, N2: Niching<T>,

§

unsafe fn is_niched(niched: *const NichedOption<T, N1>) -> bool

Returns whether the given value has been niched. Read more
§

fn resolve_niched(out: Place<NichedOption<T, N1>>)

Writes data to out indicating that a T is niched.
§

impl<T> Pointable for T

§

const ALIGN: usize = _

The alignment of pointer.
§

type Init = T

The type for initializers.
§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
§

impl<T> Pointee for T

§

type Metadata = ()

The metadata type for pointers and references to this type.
source§

impl<T> Same for T

§

type Output = T

Should always be Self
source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<T> Upcastable for T
where T: Any + Send + Sync + 'static,

§

fn upcast_any_ref(&self) -> &(dyn Any + 'static)

upcast ref
§

fn upcast_any_mut(&mut self) -> &mut (dyn Any + 'static)

upcast mut ref
§

fn upcast_any_box(self: Box<T>) -> Box<dyn Any>

upcast boxed dyn